source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
main.py
|
import malddal
import tkinter as tk
from threading import Thread, Semaphore
from tkinter import messagebox
global version
version = "v0.8.5"
global directory
directory = r'%systemdrive%\user\%username%\desktop'
if __name__ == '__main__':
flag = False
mymalddal = malddal.malddal()
flagSem = mymalddal.getflagsem()
gamepub, resize, is_top, korean_mode = mymalddal.getGameFrom()
windowflag = False
print(gamepub)
if gamepub == 0:
hwnd = mymalddal.getHwndOfDMM()
else:
hwnd = mymalddal.getHwndOfBluestacks()
if resize:
mymalddal.request_admin_and_resize(hwnd)
exit(0)
charScriptSpec, charScript, charSpec, charIter, charIter2 = mymalddal.read_script(korean_mode)
charSkillSpec, charSkill, charSpecOfSkill = mymalddal.read_skill()
window = tk.Tk()
window.title("MALDDAL - DCInside 우마무스메 갤러리 " + version)
window.minsize(500, 500)
window.iconbitmap('malddal.ico')
if is_top:
window.attributes("-topmost", 1)
def EXIT():
flagSem.acquire()
global flag
flag = True
flagSem.release()
ocrThread.join()
window.destroy()
exit(0)
window.protocol('WM_DELETE_WINDOW', EXIT)
def EXITCheck():
flagSem.acquire()
global flag
if flag:
print("PROGRAM KILL DETECTED")
exit(0)
flagSem.release()
scriptText = []
specText = []
message = tk.StringVar()
for i in range(5):
text = tk.StringVar()
text2 = tk.StringVar()
text.set("N/A")
text2.set("N/A")
scriptText.append(text)
specText.append(text2)
script0 = tk.Label(window, textvariable=scriptText[0], height=5, width=35, relief="groove")
script1 = tk.Label(window, textvariable=scriptText[1], height=5, width=35, relief="groove")
script2 = tk.Label(window, textvariable=scriptText[2], height=5, width=35, relief="groove")
script3 = tk.Label(window, textvariable=scriptText[3], height=5, width=35, relief="groove")
script4 = tk.Label(window, textvariable=scriptText[4], height=5, width=35, relief="groove")
script5 = tk.Label(window)
spec0 = tk.Label(window, textvariable=specText[0], height=5, width=50, relief="groove")
spec0.bind("<Button-1>", lambda e: mymalddal.get_skill_info(specText[0], charSkillSpec, charSkill, charSpecOfSkill))
spec1 = tk.Label(window, textvariable=specText[1], height=5, width=50, relief="groove")
spec1.bind("<Button-1>", lambda e: mymalddal.get_skill_info(specText[1], charSkillSpec, charSkill, charSpecOfSkill))
spec2 = tk.Label(window, textvariable=specText[2], height=5, width=50, relief="groove")
spec2.bind("<Button-1>", lambda e: mymalddal.get_skill_info(specText[2], charSkillSpec, charSkill, charSpecOfSkill))
spec3 = tk.Label(window, textvariable=specText[3], height=5, width=50, relief="groove")
spec3.bind("<Button-1>", lambda e: mymalddal.get_skill_info(specText[3], charSkillSpec, charSkill, charSpecOfSkill))
spec4 = tk.Label(window, textvariable=specText[4], height=5, width=50, relief="groove")
spec4.bind("<Button-1>", lambda e: mymalddal.get_skill_info(specText[4], charSkillSpec, charSkill, charSpecOfSkill))
spec5 = tk.Label(window, textvariable=message)
def directory_button_click(dir):
global directory
directory = mymalddal.get_directory(dir)
button0 = tk.Button(window, text="이미지 저장경로 변경",
command=lambda: directory_button_click(directory), width=20, height=3)
def imagecapture(directory):
rgb, temp = mymalddal.getWindowsImage(hwnd, gamepub)
if mymalddal.game_capture(rgb, directory):
message.set("저장 실패")
window.after(2000, lambda: message.set(""))
else:
message.set(directory + " <- 저장 완료")
window.after(2000, lambda: message.set(""))
button1 = tk.Button(window, text="게임 캡쳐",
command=lambda: imagecapture(directory), width=20, height=3)
script0.grid(row=0, column=0)
script1.grid(row=1, column=0)
script2.grid(row=2, column=0)
script3.grid(row=3, column=0)
script4.grid(row=4, column=0)
script5.grid(row=5, column=0)
button0.grid(row=6, column=0)
spec0.grid(row=0, column=1)
spec1.grid(row=1, column=1)
spec2.grid(row=2, column=1)
spec3.grid(row=3, column=1)
spec4.grid(row=4, column=1)
spec5.grid(row=5, column=1)
button1.grid(row=6, column=1)
def mainloop():
lastPrinted = 99999
while True:
EXITCheck()
global image
global cap
image, cap = mymalddal.getWindowsImage(hwnd, gamepub)
script, spec, printed = mymalddal.OCR(cap, lastPrinted, charScriptSpec, charScript, charSpec, charIter,
charIter2)
if lastPrinted == printed:
continue
lastPrinted = printed
scriptIter = 0
EXITCheck()
for sc in script:
flagSem.acquire()
scriptText[scriptIter].set(sc)
flagSem.release()
scriptIter = scriptIter + 1
for sc in range(scriptIter, 5):
flagSem.acquire()
scriptText[sc].set("N/A")
flagSem.release()
scriptIter = 0
for sp in spec:
flagSem.acquire()
specText[scriptIter].set(sp)
flagSem.release()
scriptIter = scriptIter + 1
for sc in range(scriptIter, 5):
flagSem.acquire()
specText[sc].set("N/A")
flagSem.release()
ocrThread = Thread(target=mainloop)
ocrThread.start()
window.mainloop()
|
test_logging.py
|
# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import shutil
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
import smtpd
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args, encoding="utf-8")
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt', encoding='utf-8'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
threading_helper.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
threading_helper.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
kwargs={{"encoding": "utf-8"}}
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, encoding="utf-8", **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
encoding="utf-8",
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
def test_udp_reconnection(self):
logger = logging.getLogger("slh")
self.sl_hdlr.close()
self.handled.clear()
logger.error("sp\xe4m")
self.handled.wait(0.1)
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn,
"encoding": "utf-8",
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class AssertErrorMessage:
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises((), *args, **kwargs)
except exception as e:
self.assertEqual(message, str(e))
class FormatterTest(unittest.TestCase, AssertErrorMessage):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid format: invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"invalid format: bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: expected '}' before end of string",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: Single '}' encountered in format string",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# bpo-20037: Doing text I/O late at interpreter shutdown must not crash
code = textwrap.dedent("""
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()
""")
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_logging_at_shutdown_open(self):
# bpo-26789: FileHandler keeps a reference to the builtin open()
# function to be able to open or reopen the file during Python
# finalization.
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
code = textwrap.dedent(f"""
import builtins
import logging
class A:
def __del__(self):
logging.error("log in __del__")
# basicConfig() opens the file, but logging.shutdown() closes
# it at Python exit. When A.__del__() is called,
# FileHandler._open() must be called again to re-open the file.
logging.basicConfig(filename={filename!r}, encoding="utf-8")
a = A()
# Simulate the Python finalization which removes the builtin
# open() function.
del builtins.open
""")
assert_python_ok("-c", code)
with open(filename, encoding="utf-8") as fp:
self.assertEqual(fp.read().rstrip(), "ERROR:root:log in __del__")
def test_recursion_error(self):
# Issue 36272
code = textwrap.dedent("""
import logging
def rec():
logging.error("foo")
rec()
rec()
""")
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
def test_get_level_names_mapping(self):
mapping = logging.getLevelNamesMapping()
self.assertEqual(logging._nameToLevel, mapping) # value is equivalent
self.assertIsNot(logging._nameToLevel, mapping) # but not the internal data
new_mapping = logging.getLevelNamesMapping() # another call -> another copy
self.assertIsNot(mapping, new_mapping) # verify not the same object as before
self.assertEqual(mapping, new_mapping) # but equivalent in value
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
@staticmethod # pickled as target of child process in the following test
def _extract_logrecord_process_name(key, logMultiprocessing, conn=None):
prev_logMultiprocessing = logging.logMultiprocessing
logging.logMultiprocessing = logMultiprocessing
try:
import multiprocessing as mp
name = mp.current_process().name
r1 = logging.makeLogRecord({'msg': f'msg1_{key}'})
# https://bugs.python.org/issue45128
with support.swap_item(sys.modules, 'multiprocessing', None):
r2 = logging.makeLogRecord({'msg': f'msg2_{key}'})
results = {'processName' : name,
'r1.processName': r1.processName,
'r2.processName': r2.processName,
}
finally:
logging.logMultiprocessing = prev_logMultiprocessing
if conn:
conn.send(results)
else:
return results
def test_multiprocessing(self):
multiprocessing_imported = 'multiprocessing' in sys.modules
try:
# logMultiprocessing is True by default
self.assertEqual(logging.logMultiprocessing, True)
LOG_MULTI_PROCESSING = True
# When logMultiprocessing == True:
# In the main process processName = 'MainProcess'
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
results = self._extract_logrecord_process_name(1, LOG_MULTI_PROCESSING)
self.assertEqual('MainProcess', results['processName'])
self.assertEqual('MainProcess', results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
# In other processes, processName is correct when multiprocessing in imported,
# but it is (incorrectly) defaulted to 'MainProcess' otherwise (bpo-38762).
import multiprocessing
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(
target=self._extract_logrecord_process_name,
args=(2, LOG_MULTI_PROCESSING, child_conn,)
)
p.start()
results = parent_conn.recv()
self.assertNotEqual('MainProcess', results['processName'])
self.assertEqual(results['processName'], results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
p.join()
finally:
if multiprocessing_imported:
import multiprocessing
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', encoding='utf-8')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a', encoding='utf-8')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest, AssertErrorMessage):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: None',
self.logger.setLevel, None)
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: (0, 0)',
self.logger.setLevel, (0, 0))
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
def test_emit_after_closing_in_write_mode(self):
# Issue #42378
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', mode='w')
fh.setFormatter(logging.Formatter('%(message)s'))
fh.emit(self.next_rec()) # '1'
fh.close()
fh.emit(self.next_rec()) # '2'
with open(self.fn) as fp:
self.assertEqual(fp.read().strip(), '1')
class RotatingFileHandlerTest(BaseFileTest):
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
# bpo-45401 - test with special file
# We set maxBytes to 1 so that rollover would normally happen, except
# for the check for regular files
rh = logging.handlers.RotatingFileHandler(
os.devnull, encoding="utf-8", maxBytes=1)
self.assertFalse(rh.shouldRollover(self.next_rec()))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8", maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
def test_should_not_rollover(self):
# See bpo-45401. Should only ever rollover regular files
fh = logging.handlers.TimedRotatingFileHandler(
os.devnull, 'S', encoding="utf-8", backupCount=1)
time.sleep(1.1) # a little over a second ...
r = logging.makeLogRecord({'msg': 'testing - device file'})
self.assertFalse(fh.shouldRollover(r))
fh.close()
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(
self.fn, 'S', encoding="utf-8", backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', encoding="utf-8", delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='MIDNIGHT', interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='W%d' % day, interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def test_compute_files_to_delete(self):
# See bpo-46063 for background
wd = tempfile.mkdtemp(prefix='test_logging_')
self.addCleanup(shutil.rmtree, wd)
times = []
dt = datetime.datetime.now()
for i in range(10):
times.append(dt.strftime('%Y-%m-%d_%H-%M-%S'))
dt += datetime.timedelta(seconds=5)
prefixes = ('a.b', 'a.b.c', 'd.e', 'd.e.f')
files = []
rotators = []
for prefix in prefixes:
p = os.path.join(wd, '%s.log' % prefix)
rotator = logging.handlers.TimedRotatingFileHandler(p, when='s',
interval=5,
backupCount=7,
delay=True)
rotators.append(rotator)
if prefix.startswith('a.b'):
for t in times:
files.append('%s.log.%s' % (prefix, t))
else:
rotator.namer = lambda name: name.replace('.log', '') + '.log'
for t in times:
files.append('%s.%s.log' % (prefix, t))
# Create empty files
for fn in files:
p = os.path.join(wd, fn)
with open(p, 'wb') as f:
pass
# Now the checks that only the correct files are offered up for deletion
for i, prefix in enumerate(prefixes):
rotator = rotators[i]
candidates = rotator.getFilesToDelete()
self.assertEqual(len(candidates), 3)
if prefix.startswith('a.b'):
p = '%s.log.' % prefix
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.startswith(p))
else:
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.endswith('.log'))
self.assertTrue(fn.startswith(prefix + '.') and
fn[len(prefix) + 2].isdigit())
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
def setUpModule():
cm = support.run_with_locale('LC_ALL', '')
cm.__enter__()
unittest.addModuleCleanup(cm.__exit__, None, None, None)
if __name__ == "__main__":
unittest.main()
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, worker_init_fn=False):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn,
worker_init_fn=worker_init_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', self._RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
opening.py
|
#!/usr/bin/python
import paho.mqtt.client as paho
import time
import pyupm_grove as grove
from threading import Thread
button = grove.GroveButton(8)
def functionDataSensor():
return button.value()
def functionDataSensorMqttPublish():
mqttclient = paho.Client()
mqttclient.connect("iot.eclipse.org", 1883, 60)
while True:
data = functionDataSensor()
topic = "edzna/principal/opening"
mqttclient.publish(topic, data)
time.sleep(1)
if __name__ == '__main__':
threadmqttpublish = Thread(target=functionDataSensorMqttPublish)
threadmqttpublish.start()
print "Hello Edzna @ Opening"
while True:
time.sleep(5)
# End of File
|
voicemail.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# messenger.py
#
# Copyright 2018 <pi@rhombus1>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
import os
import threading
from callattendant.hardware.indicators import (MessageIndicator,
MessageCountIndicator, GPIO_MESSAGE, GPIO_MESSAGE_COUNT_PINS,
GPIO_MESSAGE_COUNT_KWARGS)
from callattendant.messaging.message import Message
class VoiceMail:
def __init__(self, db, config, modem):
"""
Initialize the database tables for voice messages.
"""
if config["DEBUG"]:
print("Initializing VoiceMail")
self.db = db
self.config = config
self.modem = modem
# Create a message event shared with the Message class used to monitor changes
self.message_event = threading.Event()
self.config["MESSAGE_EVENT"] = self.message_event
# Initialize the message indicators (LEDs)
self.message_indicator = MessageIndicator(
self.config.get("GPIO_LED_MESSAGE_PIN", GPIO_MESSAGE),
self.config.get("GPIO_LED_MESSAGE_BRIGHTNESS", 100))
pins = self.config.get("GPIO_LED_MESSAGE_COUNT_PINS", GPIO_MESSAGE_COUNT_PINS)
kwargs = self.config.get("GPIO_LED_MESSAGE_COUNT_KWARGS", GPIO_MESSAGE_COUNT_KWARGS)
self.message_count_indicator = MessageCountIndicator(*pins, **kwargs)
# Create the Message object used to interface with the DB
self.messages = Message(db, config)
# Start the thread that monitors the message events and updates the indicators
self._stop_event = threading.Event()
self._thread = threading.Thread(target=self._event_handler)
self._thread.name = "voice_mail_event_handler"
self._thread.start()
# Pulse the indicator if an unplayed msg is waiting
self.reset_message_indicator()
if self.config["DEBUG"]:
print("VoiceMail initialized")
def stop(self):
"""
Stops the voice mail thread and releases hardware resources.
"""
self._stop_event.set()
self._thread.join()
self.message_indicator.close()
self.message_count_indicator.close()
def _event_handler(self):
"""
Thread function that updates the message indicators upon a message event.
"""
while not self._stop_event.is_set():
# Get the number of unread messages
if self.message_event.wait(2.0):
if self.config["DEBUG"]:
print("Message Event triggered")
self.reset_message_indicator()
def voice_messaging_menu(self, call_no, caller):
"""
Play a voice message menu and respond to the choices.
"""
# Build some common paths
voice_mail = self.config.get_namespace("VOICE_MAIL_")
voice_mail_menu_file = voice_mail['menu_file']
invalid_response_file = voice_mail['invalid_response_file']
goodbye_file = voice_mail['goodbye_file']
# Indicate the user is in the menu
self.message_indicator.blink()
tries = 0
wait_secs = 8 # Candidate for configuration
rec_msg = False
while tries < 3:
self.modem.play_audio(voice_mail_menu_file)
success, digit = self.modem.wait_for_keypress(wait_secs)
if not success:
break
if digit == '1':
self.record_message(call_no, caller)
rec_msg = True # prevent a duplicate reset_message_indicator
break
elif digit == '0':
# End this call
break
else:
# Try again--up to a limit
self.modem.play_audio(invalid_response_file)
tries += 1
self.modem.play_audio(goodbye_file)
if not rec_msg:
self.reset_message_indicator()
def record_message(self, call_no, caller, detect_silence=True):
"""
Records a message.
"""
# Build the filename used for a potential message
path = self.config["VOICE_MAIL_MESSAGE_FOLDER"]
filepath = os.path.join(path, "{}_{}_{}_{}.wav".format(
call_no,
caller["NMBR"],
caller["NAME"].replace('_', '-'),
datetime.now().strftime("%m%d%y_%H%M")))
# Play instructions to caller
leave_msg_file = self.config["VOICE_MAIL_LEAVE_MESSAGE_FILE"]
self.modem.play_audio(leave_msg_file)
# Show recording in progress
self.message_indicator.turn_on()
if self.modem.record_audio(filepath, detect_silence):
# Save to Message table (message.add will update the indicator)
msg_no = self.messages.add(call_no, filepath)
# Return the messageID on success
return msg_no
else:
self.reset_message_indicator()
# Return failure
return None
def delete_message(self, msg_no):
"""
Removes the message record and associated wav file.
"""
# Remove message and file (message.delete will update the indicator)
return self.messages.delete(msg_no)
def reset_message_indicator(self):
unplayed_count = self.messages.get_unplayed_count()
if self.config["DEBUG"]:
print("Resetting Message Indicator to show {} unplayed messages".format(unplayed_count))
if unplayed_count > 0:
self.message_indicator.pulse()
if unplayed_count < 10:
self.message_count_indicator.display(unplayed_count)
self.message_count_indicator.decimal_point = False
else:
self.message_count_indicator.display(9)
self.message_count_indicator.decimal_point = True
else:
self.message_indicator.turn_off()
self.message_count_indicator.display(' ')
self.message_count_indicator.decimal_point = False
|
bigiq_regkey_pool_cleaner_neutron_port.py
|
#!/usr/bin/env python
# coding=utf-8
# pylint: disable=broad-except,unused-argument,line-too-long, unused-variable
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the process to scan BIG-IQ for regkey license
allocations and matches those to Neutron ports by MAC address. It
revokes any license allocations for IP addresses and MAC addresses
which are not present in OpenStack.
"""
import argparse
import logging
import os
import sys
import time
import datetime
import threading
from urlparse import urlparse
import requests
CONNECT_TIMEOUT = 30
LOG = logging.getLogger('bigiq_regkey_pool_cleaner')
LOG.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGSTREAM = logging.StreamHandler(sys.stdout)
LOGSTREAM.setFormatter(FORMATTER)
LOG.addHandler(LOGSTREAM)
def _get_bigiq_session(ctx, reuse=True):
''' Creates a Requests Session to the BIG-IQ host configured '''
if reuse and hasattr(ctx, 'bigiq') and ctx.bigiq:
LOG.debug('reusing BIG-IQ session')
return ctx.bigiq
if requests.__version__ < '2.9.1':
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
bigiq = requests.Session()
bigiq.ctx = ctx
bigiq.verify = False
bigiq.headers.update({'Content-Type': 'application/json'})
bigiq.timeout = CONNECT_TIMEOUT
token_auth_body = {'username': ctx.bigiqusername,
'password': ctx.bigiqpassword,
'loginProviderName': 'local'}
login_url = "https://%s/mgmt/shared/authn/login" % (ctx.bigiqhost)
response = bigiq.post(login_url,
json=token_auth_body,
verify=False,
auth=requests.auth.HTTPBasicAuth(
ctx.bigiqusername, ctx.bigiqpassword))
response_json = response.json()
bigiq.headers.update(
{'X-F5-Auth-Token': response_json['token']['token']})
bigiq.base_url = 'https://%s/mgmt/cm/device/licensing/pool' % ctx.bigiqhost
LOG.debug('initiated new BIG-IQ session')
ctx.bigiq = bigiq
return bigiq
def _get_openstack_session(ctx, reuse=True):
if reuse and hasattr(ctx, 'openstack') and ctx.openstack:
LOG.debug('reusing OpenStack session')
return ctx.openstack
if requests.__version__ < '2.9.1':
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
openstack = requests.Session()
openstack.ctx = ctx
openstack.verify = False
openstack.headers.update({'Content-Type': 'application/json'})
openstack.timeout = CONNECT_TIMEOUT
token_auth_body = {
'auth': {
'scope': {
'project': {
'domain': {
'name': ctx.os_project_domain_name
},
'name': ctx.os_project_name
}
},
'identity': {
'methods': ['password'],
'password': {
'user': {
'domain': {
'name': ctx.os_user_domain_name
},
'password': ctx.os_password,
'name': ctx.os_username
}
}
}
}
}
response = openstack.post('%s/auth/tokens' % ctx.os_auth_url,
json=token_auth_body,
verify=False)
response_json = response.json()
openstack.headers.update(
{'X-Auth-Token': response.headers['X-Subject-Token']})
catalogitems = response_json['token']['catalog']
for catalogitem in catalogitems:
if catalogitem['type'] == 'network':
for endpoint in catalogitem['endpoints']:
if endpoint['interface'] == ctx.os_interface:
openstack.base_url = endpoint['url']
ctx.openstack = openstack
return openstack
def _get_pool_id(ctx):
''' Get a BIG-IQ license pool by its pool name. Returns first
match of the specific pool type.
:param: bigiq_session: BIG-IQ session object
:param: pool_name: BIG-IQ pool name
:returns: Pool ID string
'''
LOG.debug('finding pool %s', ctx.licensepool)
bigiq_session = _get_bigiq_session(ctx, reuse=True)
pools_url = '%s/regkey/licenses?$select=id,kind,name' % \
bigiq_session.base_url
# Now need to check both name and uuid for match. Can't filter.
# query_filter = '&$filter=name%20eq%20%27'+pool_name+'%27'
# pools_url = "%s%s" % (pools_url, query_filter)
response = bigiq_session.get(pools_url)
response.raise_for_status()
response_json = response.json()
pools = response_json['items']
for pool in pools:
if pool['name'] == ctx.licensepool or pool['id'] == ctx.licensepool:
if str(pool['kind']).find('pool:regkey') > 1:
return pool['id']
return None
def _get_active_members(ctx):
''' Get regkey, member_id tuple by management IP address
:param: ctx:: application context
:returns: list of regkey pool members with active keys
'''
LOG.debug(
'querying pools %s: %s for active licenses', ctx.licensepool, ctx.bigiq_pool_id)
bigiq_session = _get_bigiq_session(ctx)
pools_url = '%s/regkey/licenses' % bigiq_session.base_url
offerings_url = '%s/%s/offerings' % (pools_url, ctx.bigiq_pool_id)
response = bigiq_session.get(offerings_url)
response.raise_for_status()
response_json = response.json()
offerings = response_json['items']
return_members = []
for offering in offerings:
members_url = '%s/%s/members' % (
offerings_url, offering['regKey'])
response = bigiq_session.get(members_url)
response.raise_for_status()
response_json = response.json()
members = response_json['items']
for member in members:
return_members.append(member)
return return_members
def _get_members_to_revoke(ctx, license_pool_members):
if not license_pool_members:
return []
LOG.debug(
'querying network ports for %d active license members', len(license_pool_members))
openstack_session = _get_openstack_session(ctx)
ports_url = '%s/v2.0/ports' % openstack_session.base_url
members_to_revoke = []
for member in license_pool_members:
query_filter = '?mac_address=%s' % member['macAddress'].lower()
search_url = '%s%s' % (ports_url, query_filter)
response = openstack_session.get(search_url)
response.raise_for_status()
response_json = response.json()
if not response_json['ports']:
members_to_revoke.append(member)
else:
for port in response_json['ports']:
if port['status'] != 'ACTIVE':
members_to_revoke.append(member)
return members_to_revoke
def _report(license_members, members_to_revoke):
if not license_members:
return []
return_records = []
now = datetime.datetime.utcnow()
fmt_ts = now.strftime('%Y-%m-%dT%H:%M:%S') + \
('.%03dZ' % (now.microsecond / 10000))
for member in license_members:
preserve_member = True
for revoke in members_to_revoke:
if member['id'] == revoke['id']:
preserve_member = False
return_records.append(
"OFF,%s,%s,%s" % (
fmt_ts,
member['macAddress'],
member['deviceAddress']
)
)
if preserve_member:
return_records.append(
"ON,%s,%s,%s" % (
fmt_ts,
member['macAddress'],
member['deviceAddress']
)
)
return return_records
def _revoke(ctx, member):
bigiq_session = _get_bigiq_session(ctx, reuse=False)
session_urlp = urlparse(bigiq_session.base_url)
member_urlp = urlparse(member['selfLink'])
member_url = '%s://%s%s' % (
member_urlp.scheme, session_urlp.netloc, member_urlp.path)
delete_body = {'id': member['id'],
'username': 'admin',
'password': 'revoke'}
LOG.debug('revoking license for member %s : %s',
member['id'], member['macAddress'])
response = bigiq_session.delete(member_url,
json=delete_body,
verify=False)
if response.status_code > 399:
LOG.error(
'could not revoke license for member: %s - %s', member['id'], response.text)
def reconcile(ctx, license_members, members_to_revoke):
''' print out a report for all active license members and revoke missing ports '''
if not license_members:
return
reports = _report(license_members, members_to_revoke)
if ctx.report_file:
with open(ctx.report_file, 'a+') as report_file:
for report in reports:
report_file.write(report + '\n')
else:
for report in reports:
LOG.info('report record: %s', report)
for revoke in members_to_revoke:
try:
thread = threading.Thread(target=_revoke, args=(ctx, revoke))
thread.start()
except Exception as ex:
LOG.error("error revoking member %s - %s", revoke['id'], ex)
def main(ctx):
''' main entry point '''
log_level_dict = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARN': logging.WARN,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
'FATAL': logging.FATAL
}
LOG.setLevel(log_level_dict[ctx.log_level])
if ctx.daemon:
LOG.debug('Running in daemon mode, polling every %d seconds',
ctx.poll_cycle)
while True:
# Get a new session every pool cycle
_get_bigiq_session(ctx, reuse=True)
# resolve the Pool ID from pool name
try:
ctx.bigiq_pool_id = _get_pool_id(ctx)
except KeyboardInterrupt:
LOG.info('Existing..')
sys.exit(1)
except Exception as ex:
if 'Unauthorized' in str(ex):
LOG.error('BIG-IQ session expited')
ctx.bigiq = None
ctx.bigiq_pool_id = _get_pool_id(ctx)
else:
LOG.error("Pool %s not found - %s", ctx.licensepool, ex)
time.sleep(ctx.poll_cycle)
continue
try:
LOG.debug('Polling licenses in %s pool', ctx.licensepool)
# find active licenses
license_pool_members = _get_active_members(ctx)
# find active licenses which do not have Neutron ports for their MAC address
revoke_members = _get_members_to_revoke(
ctx, license_pool_members)
# report and revoke
reconcile(ctx, license_pool_members, revoke_members)
time.sleep(ctx.poll_cycle)
except KeyboardInterrupt:
LOG.info('Existing..')
sys.exit(1)
except Exception as ex:
if 'Unauthorized' in str(ex):
LOG.error('BIG-IQ session expited')
else:
LOG.error("Error reconciling licenses %s", ex)
ctx.bigiq = None
ctx.openstack = None
time.sleep(ctx.poll_cycle)
else:
# resolve the Pool ID from pool name
try:
ctx.bigiq_pool_id = _get_pool_id(ctx)
except KeyboardInterrupt:
LOG.info('Existing..')
sys.exit(1)
except Exception as ex:
LOG.error("Pool %s not found - %s", ctx.licensepool, ex)
return False
try:
# find active licenses
LOG.debug('Polling licenses in %s pool', ctx.licensepool)
license_pool_members = _get_active_members(ctx)
# find active licenses which do not have Neutron ports for their MAC address
revoke_members = _get_members_to_revoke(ctx, license_pool_members)
# report and revoke
reconcile(ctx, license_pool_members, revoke_members)
except Exception as ex:
LOG.error("Error reconciling licenses %s", ex)
return False
return True
if __name__ == "__main__":
ARGPARSE = argparse.ArgumentParser()
ARGPARSE.add_argument('-l', '--log-level', help='set logging level',
choices=['DEBUG', 'INFO', 'WARN',
'ERROR', 'CRITICAL', 'FATAL'],
default=os.getenv('LOGLEVEL', 'INFO'))
ARGPARSE.add_argument(
'-d', '--daemon', help='Run in deamon mode', action='store_true')
ARGPARSE.add_argument('-p', '--poll-cycle', help='How often to report and revoke, default 5 minutes',
default=os.getenv('LICENSEPOOLINTERVAL', 300), type=int)
ARGPARSE.add_argument('-r', '--report-file',
help='the report log file', default=os.getenv('LICENSEREPORTFILE', None))
ARGPARSE.add_argument('--bigiqhost', help='BIG-IQ hostname or IP address',
default=os.getenv('BIGIQHOST', '192.168.245.1'))
ARGPARSE.add_argument('--bigiqusername', help='BIG-IQ username',
default=os.getenv('BIGIQUSERNAME', 'admin'))
ARGPARSE.add_argument('--bigiqpassword', help='BIG-IQ password',
default=os.getenv('BIGIQPASSWORD', 'admin'))
ARGPARSE.add_argument('--licensepool', help='BIG-IQ license pool name',
default=os.getenv('LICENSEPOOL'))
ARGPARSE.add_argument('--os-project-name', help='OpenStack project name',
default=os.getenv('OS_PROJECT_NAME', 'admin'))
ARGPARSE.add_argument('--os-project-domain-name', help='OpenStack project domain name',
default=os.getenv('OS_PROJECT_DOMAIN_NAME', 'default'))
ARGPARSE.add_argument('--os-username', help='OpenStack user name',
default=os.getenv('OS_USERNAME', 'admin'))
ARGPARSE.add_argument('--os-user-domain-name', help='OpenStack user domain name',
default=os.getenv('OS_USER_DOMAIN_NAME', 'default'))
ARGPARSE.add_argument('--os-auth-url', help='OpenStack Keystone Auth URL',
default=os.getenv('OS_AUTH_URL', 'https://localhost:35357/v3'))
ARGPARSE.add_argument('--os-password', help='OpenStack password',
default=os.getenv('OS_PASSWORD', 'openstack'))
ARGPARSE.add_argument('--os-interface', help='OpenStack authentication interface',
default=os.getenv('OS_INTERFACE', 'internal'))
if main(ARGPARSE.parse_args()):
sys.exit(0)
else:
sys.exit(1)
|
tumblr.py
|
#!/usr/bin/env python2
# vim: set fileencoding=utf8
import os
import sys
import re
import json
import requests
import argparse
import random
import multiprocessing
import time
api_key = 'fuiKNFp9vQFvjLNvx4sUwti4Yb5yGutBN4Xh10LXZhhRKjWlV4'
############################################################
# wget exit status
wget_es = {
0: "No problems occurred.",
2: "User interference.",
1<<8: "Generic error code.",
2<<8: "Parse error - for instance, when parsing command-line " \
"optio.wgetrc or .netrc...",
3<<8: "File I/O error.",
4<<8: "Network failure.",
5<<8: "SSL verification failure.",
6<<8: "Username/password authentication failure.",
7<<8: "Protocol errors.",
8<<8: "Server issued an error response."
}
############################################################
s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml; " \
"q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"text/html",
"Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2",
"Content-Type":"application/x-www-form-urlencoded",
"Referer":"https://api.tumblr.com/console//calls/blog/posts",
"User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 "\
"(KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"
}
ss = requests.session()
ss.headers.update(headers)
class tumblr(object):
def save_json(self):
with open(self.json_path, 'w') as g:
g.write(json.dumps({'key': self.key}, indent=4, sort_keys=True))
def get_site_infos(self, postid=None):
self.infos['photos'] = []
self.url = 'http://api.tumblr.com/v2/blog/%s/posts/photo' % self.infos['host']
params = {
"offset": self.key if not postid else "",
"limit": 20 if not postid else "",
"type": "photo",
"filter": "text",
"tag": args.tag,
"id": postid if postid else "",
"api_key": api_key
}
r = None
while True:
try:
r = ss.get(self.url, params=params)
break
except Exception as e:
print s % (1, 91, ' !! Error at get_infos'), e
time.sleep(5)
if r.ok:
j = r.json()
if j['response']['posts']:
for i in j['response']['posts']:
index = 1
for ii in i['photos']:
durl = ii['original_size']['url'].encode('utf8')
filepath = os.path.join(self.infos['dir_'], '%s_%s.%s' \
% (i['id'], index, durl.split('.')[-1]))
filename = os.path.split(filepath)[-1]
t = {
'filepath': filepath,
'durl': durl,
'filename': filename
}
index += 1
self.infos['photos'].append(t)
else:
print s % (1, 92, '\n --- job over ---')
sys.exit(0)
else:
print s % (1, 91, '\n !! Error, get_infos')
print r.status_code, r.content
sys.exit(1)
def get_tag_infos(self):
self.infos['photos'] = []
self.url = 'http://api.tumblr.com/v2/tagged'
params = {
"limit": 20,
"type": "photo",
"tag": self.infos['tag'],
"before": self.key,
"api_key": api_key
}
r = None
while True:
try:
r = ss.get(self.url, params=params)
break
except Exception as e:
print s % (1, 91, ' !! Error at get_infos'), e
time.sleep(5)
if r.ok:
j = r.json()
if j['response']:
for i in j['response']:
index = 1
if i.get('photos'):
for ii in i['photos']:
durl = ii['original_size']['url'].encode('utf8')
filepath = os.path.join(self.infos['dir_'], '%s_%s.%s' \
% (i['id'], index, durl.split('.')[-1]))
filename = os.path.split(filepath)[-1]
t = {
'filepath': filepath,
'durl': durl,
'filename': filename,
'key': i['timestamp']
}
index += 1
self.infos['photos'].append(t)
else:
print s % (1, 92, '\n --- job over ---')
sys.exit(0)
else:
print s % (1, 91, '\n !! Error, get_infos')
print r.status_code, r.content
sys.exit(1)
def download(self):
def run(i):
#if not os.path.exists(i['filepath']):
num = random.randint(0, 7) % 7
col = s % (1, num + 90, i['filepath'])
print '\n ++ download: %s' % col
cmd = 'wget -c -T 4 -q -O "%s.tmp" ' \
'--header "Referer:http://www.tumblr.com" ' \
'--user-agent "%s" "%s"' \
% (i['filepath'], headers['User-Agent'], i['durl'])
#cmd = 'wget -c -T 4 -q --user-agent "%s" -O "%s.tmp" "%s"' \
#% (headers['User-Agent'], i['filepath'], i['durl'])
status = os.system(cmd)
if status != 0: # other http-errors, such as 302.
wget_exit_status_info = wget_es[status]
print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> '\
'\x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' \
% (status, wget_exit_status_info))
print s % (1, 91, ' ===> '), cmd
sys.exit(1)
else:
os.rename('%s.tmp' % i['filepath'], i['filepath'])
l = [self.infos['photos'][i:i+self.processes] \
for i in range(len(self.infos['photos']))[::self.processes]]
for yy in l:
ppool = []
for ii in yy:
if not os.path.exists(ii['filepath']):
p = multiprocessing.Process(target=run, args=(ii,))
p.start()
print p
ppool.append(p)
for p in ppool: p.join()
def download_site(self, url):
self.infos = {'host': re.search(r'http(s|)://(.+?)($|/)', url).group(2)}
self.infos['dir_'] = os.path.join(os.getcwd(), self.infos['host'])
self.processes = int(args.processes)
if not os.path.exists(self.infos['dir_']):
os.makedirs(self.infos['dir_'])
self.json_path = os.path.join(self.infos['dir_'], 'json.json')
self.key = 0
print s % (1, 92, '\n ## begin'), 'key = %s' % self.key
else:
self.json_path = os.path.join(self.infos['dir_'], 'json.json')
if os.path.exists(self.json_path):
self.key = json.loads(open(self.json_path).read())['key'] - 20
print s % (1, 92, '\n ## begin'), 'key = %s' % self.key
else:
self.key = 0
if args.check:
t = os.listdir(self.infos['dir_'])
t = [i[:i.find('_')] for i in t if i.endswith('.tmp')]
ltmp = list(set(t))
for postid in ltmp:
self.get_site_infos(postid)
self.download()
else:
while True:
self.get_site_infos()
self.key += 20
self.save_json()
self.download()
def download_tag(self, tag):
self.infos = {'tag': tag}
self.infos['dir_'] = os.path.join(os.getcwd(), 'tumblr-%s' % self.infos['tag'])
self.processes = int(args.processes)
if not os.path.exists(self.infos['dir_']):
os.makedirs(self.infos['dir_'])
self.json_path = os.path.join(self.infos['dir_'], 'json.json')
self.key = int(time.time())
print s % (1, 92, '\n ## begin'), 'key = %s' % self.key
else:
self.json_path = os.path.join(self.infos['dir_'], 'json.json')
if os.path.exists(self.json_path):
self.key = json.loads(open(self.json_path).read())['key']
print s % (1, 92, '\n ## begin'), 'key = %s' % self.key
else:
self.key = int(time.time())
if args.check:
t = os.listdir(self.infos['dir_'])
t = [i[:i.find('_')] for i in t if i.endswith('.tmp')]
ltmp = list(set(t))
for postid in ltmp:
self.get_site_infos(postid)
self.download()
else:
while True:
self.get_tag_infos()
self.key = self.infos['photos'][-1]['key']
self.save_json()
self.download()
def main(argv):
p = argparse.ArgumentParser(description='download from tumblr.com')
p.add_argument('xxx', help='xxx')
p.add_argument('-p', '--processes', action='store', default=5, \
help='指定多进程数,默认为5个,最多为20个 eg: -p 20')
p.add_argument('-c', '--check', action='store_true', \
help='尝试修复未下载成功的图片')
p.add_argument('-t', '--tag', action='store', \
default=None, type=str, help='下载特定tag的图片, eg: -t beautiful')
global args
args = p.parse_args(argv[1:])
xxx = args.xxx
if 'http:' in xxx:
x = tumblr()
x.download_site(xxx)
else:
x = tumblr()
x.download_tag(xxx)
if __name__ == '__main__':
argv = sys.argv
main(argv)
|
tunnel.py
|
"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 IPython Development Team, Min Ragan-Kelley
#
# Redistributed from IPython under the terms of the BSD License.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os,sys,atexit
import socket
from getpass import getpass, getuser
try:
import paramiko
except ImportError:
paramiko = None
else:
from zmq.ssh.forward import forward_tunnel
try:
import pexpect
except ImportError:
pexpect = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in range(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
p = pexpect.spawn(cmd)
while True:
try:
p.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel) : (str, object)
The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
username, server, port = _split_server(server)
server = username + "@" + server
cmd = ssh + " -f -p %i -L 127.0.0.1:%i:%s:%i %s sleep %i"%(port, lport, remoteip, rport, server, timeout)
tunnel = pexpect.spawn(cmd)
failed = False
while True:
try:
tunnel.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print(tunnel.exitstatus)
print(tunnel.before)
print(tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
try:
from multiprocessing import Process
except ImportError:
raise ImportError("multiprocessing module required for backgrounding Paramiko tunnnels")
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon=False
p.start()
atexit.register(_shutdown_process, p)
return p
def _shutdown_process(p):
if p.is_alive():
p.terminate()
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# print('Now forwarding port %d to %s:%d ...' % (lport, server, rport))
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception as e:
print("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
core.py
|
# -*- coding: utf-8 -*-
import logging
import time
from requests.exceptions import ConnectionError
from threading import Thread
import pymongo
import requests
import json
import sys
import socketIO_client
from . import masks
from . import customized_methods
from six import iteritems
from six import string_types as basestring
setattr(socketIO_client.transports.XHR_PollingTransport,
'recv_packet', customized_methods.custom_recv_packet)
class CryptocompareClient(object):
def __init__(self, sub_strings=None, websocket_url='https://streamer.cryptocompare.com',
mongo_col=None, namespace=None):
"""CryptocompareClient connects to the Websocket and Rest APIs of Cryptocompare.
Args:
sub_strings (optional): Websocket subscriptions, defaults to None.
The strings must have the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'
sub_strings must either be a list of strings or a single strings
websocket_url (optional): The url used to connect to the websocket.
Defaults to 'https://streamer.cryptocompare.com'
mongo_col (optional): MongoDB (pymongo) collection to insert messages into.
Defaults to None
namespace (optional): socketIO Namespace used to handle events.
Defaults to None.
"""
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
if isinstance(sub_strings, list):
self.sub_strings = sub_strings[:]
else:
self.sub_strings = sub_strings
self.url = websocket_url
self.mongo_col = mongo_col
self.namespace = namespace
self.restart_after = None
self._init_websocket()
def _init_websocket(self):
if self.namespace is None:
self.socket = socketIO_client.SocketIO(self.url)
else:
self.socket = socketIO_client.SocketIO(self.url, Namespace=self.namespace)
self.socket.on('m', self._on_message)
if self.sub_strings is not None:
self.subscribe(sub_strings=self.sub_strings[:])
def restart(self):
"""Restart websocket"""
logging.info("Restarting Cryptocompare Client...")
self.stop()
if hasattr(self, "thread"):
self.thread.join()
self._init_websocket()
self.listen(self.seconds, self.restart_after)
def listen(self, seconds=None, restart_after=None):
"""Start listening to the websocket.
Args:
seconds: Number of seconds to listen. Defaults to None.
If not specified, client will listen forever.
restart_after: Number of seconds to wait until restart,
when no messages are received. If not specified,
client will not restart.
"""
self.seconds = seconds
self.restart_after = restart_after
self.start_time = time.time()
self.received_messages = []
if restart_after is None:
if self.seconds is not None:
self.socket.wait(seconds=seconds)
else:
self.socket.wait()
else:
def _wait_thread():
if self.seconds is not None:
self.socket.wait(seconds=seconds)
else:
self.socket.wait()
self.thread = Thread(target=_wait_thread)
self.thread.start()
try:
if restart_after is not None:
time.sleep(restart_after)
while True:
n_messages = len(filter(lambda message_time:
time.time()-message_time < restart_after,
self.received_messages))
logging.debug("Number of messages in last %s seconds: %s",
restart_after, n_messages)
if restart_after is not None:
if n_messages == 0:
self.restart()
break
time.sleep(1)
except KeyboardInterrupt:
logging.debug("KeyboardInterrupt: Stopping...")
self.stop()
self.thread.join()
def stop(self):
"""Disconnect websocket"""
self.socket.disconnect()
def get_coin_list(self, base_url='https://www.cryptocompare.com/api/data/'):
"""Return coin list, see https://www.cryptocompare.com/api/#-api-data-coinlist-"""
r = requests.get('{}coinlist/'.format(base_url))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_coin_snapshot(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):
"""Return coin snapshot, see https://www.cryptocompare.com/api/#-api-data-coinsnapshot-"""
r = requests.get('{}coinsnapshot/?fsym={}&tsym={}'.format(base_url,fsym,tsym))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_top_pairs(self, fsym, limit=2000, base_url='https://min-api.cryptocompare.com/data/'):
"""Return top currency pairs by volume, see https://www.cryptocompare.com/api/#-api-data-toppairs-"""
r = requests.get('{}top/pairs?fsym={}&limit={}'.format(base_url, fsym, limit))
if r.status_code == 200:
return r.json()
else:
return r.status_code
def get_all_coins(self, base_url='https://www.cryptocompare.com/api/data/'):
"""Return a list of all coins that are available on CryptoCompare"""
coin_list = self.get_coin_list(base_url=base_url)
return [coin for coin,d in iteritems(coin_list['Data'])]
def get_all_exchanges(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):
"""Return a list of all exchanges that trade a currency pair"""
res = self.get_coin_snapshot(fsym, tsym, base_url=base_url)
try:
exchanges = res['Data']['Exchanges']
markets = [x['MARKET'] for x in exchanges]
return sorted(markets)
except KeyError:
return res
def query_rest_api(self, api_name, base_url='https://min-api.cryptocompare.com/data/', **params):
"""Query the Rest API with specified params"""
query_params = '&'.join(['{}={}'.format(k,v) for k,v in iteritems(params)])
query_string = base_url + api_name + '?' + query_params
r = requests.get(query_string)
if r.status_code == 200:
return r.json()
else:
return r.status_code
def subscribe(self, method=None, exchange=None, currency_pair=None, sub_strings=None):
"""Subscribe to websocket channels
The channels must either be specified by the parameter sub_strings or by a combination
of the parameters method, exchange and currency_pair.
Args:
method (optional): The method must either be 'TRADE', 'CURRENT', 'CURRENTAGG' or
one of the corresponding SubsciptionIDs (0, 2 or 5).
See https://www.cryptocompare.com/api/#-api-web-socket-subscribe- for more
information.
exchange (optional): A valid exchange name that is recognized by the cryptocompare API.
currency_pair (optional): A tuple of currency symbols that are recognized by the
cryptocompare API, such as ('BTC','USD')
sub_strings (optional): Subscription strings in the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'.
sub_strings must either be a list of strings or a single string-
"""
if method is None and exchange is None and currency_pair is None and sub_strings is None:
raise ValueError("Either sub_strings or method, exchange, and currency_pair must be specified.")
elif sub_strings is not None:
if method is not None or exchange is not None or currency_pair is not None:
raise ValueError("If sub_strings is specified, all other keyword arguments must be None.")
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
elif method is None or exchange is None or currency_pair is None:
raise ValueError("If sub_strings is None, all other keyword arguments must be specified.")
else:
method = self._convert_method_to_number(method)
sub_strings = ['{}~{}~{}~{}'.format(method,
exchange,
currency_pair[0],
currency_pair[1])]
if self.sub_strings is None:
self.sub_strings = []
self.sub_strings.extend(sub_strings)
self.sub_strings = list(set(self.sub_strings))
try:
self.socket.emit('SubAdd', { 'subs': sub_strings })
except ConnectionError as e:
logging.info("ConnectionError: %s", e)
self.restart()
def unsubscribe(self, method=None, exchange=None, currency_pair=None, sub_strings=None):
"""Unubscribe from websocket channels
The channels must either be specified by the parameter sub_strings or by a combination
of the parameters method, exchange and currency_pair.
Args:
method (optional): The method must either be 'TRADE', 'CURRENT', 'CURRENTAGG' or
one of the corresponding SubsciptionIDs (0, 2 or 5).
See https://www.cryptocompare.com/api/#-api-web-socket-subscribe- for more
information.
exchange (optional): A valid exchange name that is recognized by the cryptocompare API.
currency_pair (optional): A tuple of currency symbols that are recognized by the
cryptocompare API, such as ('BTC','USD')
sub_strings (optional): Subscription strings in the format
'{SubscriptionId}~{ExchangeName}~{FromSymbol}~{ToSymbol}'.
sub_strings must either be a list of strings or a single string-
"""
if sub_strings is not None:
if isinstance(sub_strings, basestring):
sub_strings = [sub_strings]
self.socket.emit('SubRemove', { 'subs': sub_strings })
else:
method = self._convert_method_to_number(method)
sub_strings = ['{}~{}~{}~{}'.format(method,
exchange,
currency_pair[0],
currency_pair[1])]
self.socket.emit('SubRemove', { 'subs': sub_strings })
def unsubscribe_all(self):
"""Unsubscribe from all channels that have been subscribed"""
self.socket.emit('SubRemove', { 'subs': self.sub_strings })
def _convert_method_to_number(self, method):
"""Convert method name to corresponding SubscriptionId"""
if str(method).upper() not in ['0', '2', '5', 'TRADE', 'CURRENT', 'CURRENTAGG']:
raise ValueError('Method has invalid value: {}'.format(method))
if str(method).upper() == 'TRADE' :
method = '0'
elif str(method).upper() == 'CURRENT':
method = '2'
elif str(method).upper() == 'CURRENTAGG':
method = '5'
return method
def _parse_message(self, response):
"""Parse a message received through websocket and return dictionary
Args:
response (str): The raw message
"""
response_list = response.split('~')
sub_id = response_list[0]
try:
if sub_id == '0': # TRADE
keys = ['SubscriptionId','ExchangeName','CurrencySymbol','CurrencySymbol','Flag','TradeId','TimeStamp','Quantity','Price','Total']
res = dict(zip(keys, response_list))
elif sub_id == '2' or sub_id == '5': # CURRENT / CURRENTAGG
unpacked = {}
mask = int(response_list[-1], 16)
i = 0
for key,value in masks.current:
if value == 0 or mask & value:
unpacked[key] = response_list[i]
i += 1
res = unpacked
else:
logging.debug("Unknown sub_id in message: %s", response)
res = None
except:
logging.warning("Parsing failed for: %s", response)
res = None
return res
def _on_message(self, *args):
"""Handle received messages and write to MongoDB if mongo_col was specified"""
parsed_message = self._parse_message(args[0])
if parsed_message is None:
logging.debug(("Could not parse message: %s", args[0]))
return
logging.debug("Received message: %s", parsed_message)
parsed_message = self.process_message(parsed_message)
if self.mongo_col is not None:
self.mongo_col.insert_one(parsed_message)
def process_message(self, msg):
"""Override this method to alter or handle incoming messages"""
if self.mongo_col is None:
print(msg)
return msg
|
cloud_verifier_tornado.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import signal
import traceback
import sys
import functools
import asyncio
import os
from multiprocessing import Process
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
import tornado.ioloop
import tornado.web
from keylime import config
from keylime import json
from keylime import registrar_client
from keylime.agentstates import AgentAttestStates
from keylime.common import states, validators, retry
from keylime.db.verifier_db import VerfierMain
from keylime.db.verifier_db import VerifierAllowlist
from keylime.db.keylime_db import DBEngineManager, SessionManager
from keylime import keylime_logging
from keylime import cloud_verifier_common
from keylime import revocation_notifier
from keylime import web_util
from keylime import tornado_requests
from keylime import api_version as keylime_api_version
from keylime.failure import MAX_SEVERITY_LABEL, Failure, Component
logger = keylime_logging.init_logging('cloudverifier')
try:
engine = DBEngineManager().make_engine('cloud_verifier')
except SQLAlchemyError as err:
logger.error('Error creating SQL engine or session: %s', err)
sys.exit(1)
def get_session():
return SessionManager().make_session(engine)
def get_AgentAttestStates():
return AgentAttestStates.get_instance()
# The "exclude_db" dict values are removed from the response before adding the dict to the DB
# This is because we want these values to remain ephemeral and not stored in the database.
exclude_db = {
'registrar_data': '',
'nonce': '',
'b64_encrypted_V': '',
'provide_V': True,
'num_retries': 0,
'pending_event': None,
'first_verified': False,
# the following 3 items are updated to VerifierDB only when the AgentState is stored
'boottime': '',
'ima_pcrs': [],
'pcr10': '',
'next_ima_ml_entry': 0,
'learned_ima_keyrings': {},
'ssl_context': None,
}
def _from_db_obj(agent_db_obj):
fields = [ 'agent_id', \
'v', \
'ip', \
'port', \
'operational_state', \
'public_key', \
'tpm_policy', \
'vtpm_policy', \
'meta_data', \
'mb_refstate', \
'allowlist', \
'ima_sign_verification_keys', \
'revocation_key', \
'accept_tpm_hash_algs', \
'accept_tpm_encryption_algs', \
'accept_tpm_signing_algs', \
'hash_alg', \
'enc_alg', \
'sign_alg', \
'boottime', \
'ima_pcrs', \
'pcr10', \
'next_ima_ml_entry', \
'learned_ima_keyrings',
'supported_version',
'mtls_cert',
'ak_tpm',
]
agent_dict = {}
for field in fields:
agent_dict[field] = getattr(agent_db_obj, field, None)
# add default fields that are ephemeral
for key,val in exclude_db.items():
agent_dict[key] = val
return agent_dict
def verifier_db_delete_agent(session, agent_id):
get_AgentAttestStates().delete_by_agent_id(agent_id)
session.query(VerfierMain).filter_by(
agent_id=agent_id).delete()
session.commit()
def store_attestation_state(agentAttestState):
# Only store if IMA log was evaluated
if agentAttestState.get_ima_pcrs():
session = get_session()
try:
update_agent = session.query(VerfierMain).get(agentAttestState.get_agent_id())
update_agent.boottime = agentAttestState.get_boottime()
update_agent.next_ima_ml_entry = agentAttestState.get_next_ima_ml_entry()
ima_pcrs_dict = agentAttestState.get_ima_pcrs()
update_agent.ima_pcrs = list(ima_pcrs_dict.keys())
for pcr_num, value in ima_pcrs_dict.items():
setattr(update_agent, 'pcr%d' % pcr_num, value)
update_agent.learned_ima_keyrings = agentAttestState.get_ima_keyrings().to_json()
try:
session.add(update_agent)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error on storing attestation state: %s', e)
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error on storing attestation state: %s', e)
class BaseHandler(tornado.web.RequestHandler):
def prepare(self): # pylint: disable=W0235
super().prepare()
def write_error(self, status_code, **kwargs):
self.set_header('Content-Type', 'text/json')
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
lines = []
for line in traceback.format_exception(*kwargs["exc_info"]):
lines.append(line)
self.finish(json.dumps({
'code': status_code,
'status': self._reason,
'traceback': lines,
'results': {},
}))
else:
self.finish(json.dumps({
'code': status_code,
'status': self._reason,
'results': {},
}))
def data_received(self, chunk):
raise NotImplementedError()
class MainHandler(tornado.web.RequestHandler):
def head(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def get(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def delete(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def post(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def put(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def data_received(self, chunk):
raise NotImplementedError()
class VersionHandler(BaseHandler):
def head(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def get(self):
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(self, 405, "Not Implemented")
return
if "version" not in rest_params:
web_util.echo_json_response(self, 400, "URI not supported")
logger.warning('GET returning 400 response. URI not supported: %s', self.request.path)
return
version_info = {
"current_version": keylime_api_version.current_version(),
"supported_versions": keylime_api_version.all_versions(),
}
web_util.echo_json_response(self, 200, "Success", version_info)
def delete(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def post(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def put(self):
web_util.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def data_received(self, chunk):
raise NotImplementedError()
class AgentsHandler(BaseHandler):
mtls_options = None # Stores the cert, key and password used by the verifier for mTLS connections
def initialize(self, mtls_options):
self.mtls_options = mtls_options
def head(self):
"""HEAD not supported"""
web_util.echo_json_response(self, 405, "HEAD not supported")
def get(self):
"""This method handles the GET requests to retrieve status on agents from the Cloud Verifier.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. Agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned. If the agent_id
was not found, it either completed successfully, or failed. If found, the agent_id is still polling
to contact the Cloud Agent.
"""
session = get_session()
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: %s', self.request.path)
return
agent_id = rest_params["agents"]
if (agent_id is not None) and (agent_id != ''):
# If the agent ID is not valid (wrong set of characters),
# just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("GET received an invalid agent ID: %s", agent_id)
return
try:
agent = session.query(VerfierMain).filter_by(
agent_id=agent_id).one_or_none()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is not None:
response = cloud_verifier_common.process_get_status(agent)
web_util.echo_json_response(self, 200, "Success", response)
else:
web_util.echo_json_response(self, 404, "agent id not found")
else:
json_response = None
if "bulk" in rest_params.keys():
agent_list = None
if ("verifier" in rest_params.keys()) and (rest_params["verifier"] != ''):
agent_list = session.query(VerfierMain).filter_by(verifier_id=rest_params["verifier"]).all()
else:
agent_list = session.query(VerfierMain).all()
json_response = {}
for agent in agent_list:
json_response[agent.agent_id] = cloud_verifier_common.process_get_status(agent)
web_util.echo_json_response(self, 200, "Success", json_response)
else:
if ("verifier" in rest_params.keys()) and (rest_params["verifier"] != ''):
json_response = session.query(VerfierMain.agent_id).filter_by(
verifier_id=rest_params["verifier"]).all()
else:
json_response = session.query(VerfierMain.agent_id).all()
web_util.echo_json_response(self, 200, "Success", {
'uuids': json_response})
logger.info('GET returning 200 response for agent_id list')
def delete(self):
"""This method handles the DELETE requests to remove agents from the Cloud Verifier.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
session = get_session()
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
return
agent_id = rest_params["agents"]
if agent_id is None:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning('DELETE returning 400 response. uri not supported: %s', self.request.path)
return
# If the agent ID is not valid (wrong set of characters), just
# do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("DELETE received an invalid agent ID: %s", agent_id)
return
try:
agent = session.query(VerfierMain).filter_by(
agent_id=agent_id).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is None:
web_util.echo_json_response(self, 404, "agent id not found")
logger.info('DELETE returning 404 response. agent id: %s not found.', agent_id)
return
verifier_id = config.get('cloud_verifier', 'cloudverifier_id', fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID)
if verifier_id != agent.verifier_id:
web_util.echo_json_response(self, 404, "agent id associated to this verifier")
logger.info('DELETE returning 404 response. agent id: %s not associated to this verifer.', agent_id)
return
op_state = agent.operational_state
if op_state in (states.SAVED, states.FAILED, states.TERMINATED,
states.TENANT_FAILED, states.INVALID_QUOTE):
try:
verifier_db_delete_agent(session, agent_id)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
web_util.echo_json_response(self, 200, "Success")
logger.info('DELETE returning 200 response for agent id: %s', agent_id)
else:
try:
update_agent = session.query(VerfierMain).get(agent_id)
update_agent.operational_state = states.TERMINATED
try:
session.add(update_agent)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
session.commit()
web_util.echo_json_response(self, 202, "Accepted")
logger.info('DELETE returning 202 response for agent id: %s', agent_id)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
def post(self):
"""This method handles the POST requests to add agents to the Cloud Verifier.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's will return errors.
agents requests require a json block sent in the body
"""
session = get_session()
try:
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning('POST returning 400 response. uri not supported: %s', self.request.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
# If the agent ID is not valid (wrong set of
# characters), just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("POST received an invalid agent ID: %s", agent_id)
return
content_length = len(self.request.body)
if content_length == 0:
web_util.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('POST returning 400 response. Expected non zero content length.')
else:
json_body = json.loads(self.request.body)
agent_data = {}
agent_data['v'] = json_body['v']
agent_data['ip'] = json_body['cloudagent_ip']
agent_data['port'] = int(json_body['cloudagent_port'])
agent_data['operational_state'] = states.START
agent_data['public_key'] = ""
agent_data['tpm_policy'] = json_body['tpm_policy']
agent_data['vtpm_policy'] = json_body['vtpm_policy']
agent_data['meta_data'] = json_body['metadata']
agent_data['allowlist'] = json_body['allowlist']
agent_data['mb_refstate'] = json_body['mb_refstate']
agent_data['ima_sign_verification_keys'] = json_body['ima_sign_verification_keys']
agent_data['revocation_key'] = json_body['revocation_key']
agent_data['accept_tpm_hash_algs'] = json_body['accept_tpm_hash_algs']
agent_data['accept_tpm_encryption_algs'] = json_body['accept_tpm_encryption_algs']
agent_data['accept_tpm_signing_algs'] = json_body['accept_tpm_signing_algs']
agent_data['supported_version'] = json_body['supported_version']
agent_data['hash_alg'] = ""
agent_data['enc_alg'] = ""
agent_data['sign_alg'] = ""
agent_data['agent_id'] = agent_id
agent_data['boottime'] = 0
agent_data['ima_pcrs'] = []
agent_data['pcr10'] = None
agent_data['next_ima_ml_entry'] = 0
agent_data['learned_ima_keyrings'] = {}
agent_data['verifier_id'] = config.get('cloud_verifier', 'cloudverifier_id', fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID)
agent_data['verifier_ip'] = config.get('cloud_verifier', 'cloudverifier_ip')
agent_data['verifier_port'] = config.get('cloud_verifier', 'cloudverifier_port')
# We fetch the registrar data directly here because we require it for connecting to the agent
# using mTLS
registrar_client.init_client_tls('cloud_verifier')
registrar_data = registrar_client.getData(config.get("cloud_verifier", "registrar_ip"),
config.get("cloud_verifier", "registrar_port"), agent_id)
if registrar_data is None:
web_util.echo_json_response(self, 400,
f"Data for agent {agent_id} could not be found in registrar!")
logger.warning("Data for agent %s could not be found in registrar!", agent_id)
return
agent_data['mtls_cert'] = registrar_data.get('mtls_cert', None)
agent_data['ak_tpm'] = registrar_data['aik_tpm']
# TODO: Always error for v1.0 version after initial upgrade
if registrar_data.get('mtls_cert', None) is None and agent_data['supported_version'] != "1.0":
web_util.echo_json_response(self, 400, "mTLS certificate for agent is required!")
return
is_valid, err_msg = cloud_verifier_common.validate_agent_data(agent_data)
if not is_valid:
web_util.echo_json_response(self, 400, err_msg)
logger.warning(err_msg)
return
try:
new_agent_count = session.query(
VerfierMain).filter_by(agent_id=agent_id).count()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise e
# don't allow overwriting
if new_agent_count > 0:
web_util.echo_json_response(
self, 409, "Agent of uuid %s already exists" % (agent_id))
logger.warning("Agent of uuid %s already exists", agent_id)
else:
try:
# Add the agent and data
session.add(VerfierMain(**agent_data))
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise e
# add default fields that are ephemeral
for key,val in exclude_db.items():
agent_data[key] = val
# Prepare SSLContext for mTLS connections
agent_mtls_cert_enabled = config.getboolean('cloud_verifier', 'agent_mtls_cert_enabled', fallback=False)
mtls_cert = registrar_data.get('mtls_cert', None)
agent_data['ssl_context'] = None
if agent_mtls_cert_enabled and mtls_cert:
agent_data['ssl_context'] = web_util.generate_agent_mtls_context(mtls_cert, self.mtls_options)
if agent_data['ssl_context'] is None:
logger.warning('Connecting to agent without mTLS: %s', agent_id)
asyncio.ensure_future(
process_agent(agent_data, states.GET_QUOTE))
web_util.echo_json_response(self, 200, "Success")
logger.info('POST returning 200 response for adding agent id: %s', agent_id)
else:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("POST returning 400 response. uri not supported")
except Exception as e:
web_util.echo_json_response(self, 400, "Exception error: %s" % e)
logger.warning("POST returning 400 response. Exception error: %s", e)
logger.exception(e)
def put(self):
"""This method handles the PUT requests to add agents to the Cloud Verifier.
Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's will return errors.
agents requests require a json block sent in the body
"""
session = get_session()
try:
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning('PUT returning 400 response. uri not supported: %s', self.request.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("PUT returning 400 response. uri not supported")
# If the agent ID is not valid (wrong set of characters),
# just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("PUT received an invalid agent ID: %s", agent_id)
return
try:
verifier_id = config.get('cloud_verifier', 'cloudverifier_id', fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID)
agent = session.query(VerfierMain).filter_by(
agent_id=agent_id, verifier_id=verifier_id).one()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise e
if agent is None:
web_util.echo_json_response(self, 404, "agent id not found")
logger.info('PUT returning 404 response. agent id: %s not found.', agent_id)
return
if "reactivate" in rest_params:
if not isinstance(agent, dict):
agent = _from_db_obj(agent)
if agent["mtls_cert"]:
agent['ssl_context'] = web_util.generate_agent_mtls_context(agent["mtls_cert"], self.mtls_options)
agent["operational_state"] = states.START
asyncio.ensure_future(
process_agent(agent, states.GET_QUOTE))
web_util.echo_json_response(self, 200, "Success")
logger.info('PUT returning 200 response for agent id: %s', agent_id)
elif "stop" in rest_params:
# do stuff for terminate
logger.debug("Stopping polling on %s", agent_id)
try:
session.query(VerfierMain).filter(VerfierMain.agent_id == agent_id).update(
{'operational_state': states.TENANT_FAILED})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
web_util.echo_json_response(self, 200, "Success")
logger.info('PUT returning 200 response for agent id: %s', agent_id)
else:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("PUT returning 400 response. uri not supported")
except Exception as e:
web_util.echo_json_response(self, 400, "Exception error: %s" % e)
logger.warning("PUT returning 400 response. Exception error: %s", e)
logger.exception(e)
def data_received(self, chunk):
raise NotImplementedError()
class AllowlistHandler(BaseHandler):
def head(self):
web_util.echo_json_response(
self, 400, "Allowlist handler: HEAD Not Implemented")
def get(self):
"""Get an allowlist
GET /allowlists/{name}
"""
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None or 'allowlists' not in rest_params:
web_util.echo_json_response(self, 400, "Invalid URL")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
allowlist_name = rest_params['allowlists']
if allowlist_name is None:
web_util.echo_json_response(self, 400, "Invalid URL")
logger.warning(
'GET returning 400 response: %s', self.request.path)
return
session = get_session()
try:
allowlist = session.query(VerifierAllowlist).filter_by(
name=allowlist_name).one()
except NoResultFound:
web_util.echo_json_response(self, 404, "Allowlist %s not found" % allowlist_name)
return
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
web_util.echo_json_response(self, 500, "Failed to get allowlist")
raise
response = {}
for field in ('name', 'tpm_policy', 'vtpm_policy', 'ima_policy'):
response[field] = getattr(allowlist, field, None)
web_util.echo_json_response(self, 200, 'Success', response)
def delete(self):
"""Delete an allowlist
DELETE /allowlists/{name}
"""
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None or 'allowlists' not in rest_params:
web_util.echo_json_response(self, 400, "Invalid URL")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
allowlist_name = rest_params['allowlists']
if allowlist_name is None:
web_util.echo_json_response(self, 400, "Invalid URL")
logger.warning(
'DELETE returning 400 response: %s', self.request.path)
return
session = get_session()
try:
session.query(VerifierAllowlist).filter_by(
name=allowlist_name).one()
except NoResultFound:
web_util.echo_json_response(self, 404, "Allowlist %s not found" % allowlist_name)
return
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
web_util.echo_json_response(self, 500, "Failed to get allowlist")
raise
try:
session.query(VerifierAllowlist).filter_by(
name=allowlist_name).delete()
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
web_util.echo_json_response(self, 500, "Failed to get allowlist")
raise
# NOTE(kaifeng) 204 Can not have response body, but current helper
# doesn't support this case.
self.set_status(204)
self.set_header('Content-Type', 'application/json')
self.finish()
logger.info(
'DELETE returning 204 response for allowlist: %s', allowlist_name)
def post(self):
"""Create an allowlist
POST /allowlists/{name}
body: {"tpm_policy": {..}, "vtpm_policy": {..}
"""
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None or 'allowlists' not in rest_params:
web_util.echo_json_response(self, 400, "Invalid URL")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
allowlist_name = rest_params['allowlists']
if allowlist_name is None:
web_util.echo_json_response(self, 400, "Invalid URL")
return
content_length = len(self.request.body)
if content_length == 0:
web_util.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning(
'POST returning 400 response. Expected non zero content length.')
return
allowlist = {}
json_body = json.loads(self.request.body)
allowlist['name'] = allowlist_name
tpm_policy = json_body.get('tpm_policy')
if tpm_policy:
allowlist['tpm_policy'] = tpm_policy
vtpm_policy = json_body.get('vtpm_policy')
if vtpm_policy:
allowlist['vtpm_policy'] = vtpm_policy
ima_policy = json_body.get('ima_policy')
if ima_policy:
allowlist['ima_policy'] = ima_policy
session = get_session()
# don't allow overwritting
try:
al_count = session.query(
VerifierAllowlist).filter_by(name=allowlist_name).count()
if al_count > 0:
web_util.echo_json_response(
self, 409, "Allowlist with name %s already exists" % allowlist_name)
logger.warning(
"Allowlist with name %s already exists", allowlist_name)
return
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
try:
# Add the agent and data
session.add(VerifierAllowlist(**allowlist))
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
web_util.echo_json_response(self, 201)
logger.info('POST returning 201')
def put(self):
web_util.echo_json_response(
self, 400, "Allowlist handler: PUT Not Implemented")
def data_received(self, chunk):
raise NotImplementedError()
async def invoke_get_quote(agent, need_pubkey):
failure = Failure(Component.INTERNAL, ["verifier"])
if agent is None:
raise Exception("agent deleted while being processed")
params = cloud_verifier_common.prepare_get_quote(agent)
partial_req = "1"
if need_pubkey:
partial_req = "0"
# TODO: remove special handling after initial upgrade
if agent['ssl_context']:
res = tornado_requests.request("GET",
"https://%s:%d/v%s/quotes/integrity?nonce=%s&mask=%s&vmask=%s&partial=%s&ima_ml_entry=%d" %
(agent['ip'], agent['port'], agent['supported_version'], params["nonce"], params["mask"], params['vmask'], partial_req, params['ima_ml_entry']),
context=agent['ssl_context'])
else:
res = tornado_requests.request("GET",
"http://%s:%d/v%s/quotes/integrity?nonce=%s&mask=%s&vmask=%s&partial=%s&ima_ml_entry=%d" %
(agent['ip'], agent['port'], agent['supported_version'], params["nonce"], params["mask"],
params['vmask'], partial_req, params['ima_ml_entry']))
response = await res
if response.status_code != 200:
# this is a connection error, retry get quote
if response.status_code in [500, 599]:
asyncio.ensure_future(process_agent(
agent, states.GET_QUOTE_RETRY))
else:
# catastrophic error, do not continue
logger.critical("Unexpected Get Quote response error for cloud agent %s, Error: %s", agent['agent_id'], response.status_code)
failure.add_event("no_quote", "Unexpected Get Quote reponse from agent", False)
asyncio.ensure_future(process_agent(agent, states.FAILED, failure))
else:
try:
json_response = json.loads(response.body)
# validate the cloud agent response
if 'provide_V' not in agent :
agent['provide_V'] = True
agentAttestState = get_AgentAttestStates().get_by_agent_id(agent['agent_id'])
failure = cloud_verifier_common.process_quote_response(agent, json_response['results'], agentAttestState)
if not failure:
if agent['provide_V']:
asyncio.ensure_future(process_agent(agent, states.PROVIDE_V))
else:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
else:
asyncio.ensure_future(process_agent(agent, states.INVALID_QUOTE, failure))
# store the attestation state
store_attestation_state(agentAttestState)
except Exception as e:
logger.exception(e)
async def invoke_provide_v(agent):
failure = Failure(Component.INTERNAL, ["verifier"])
if agent is None:
raise Exception("Agent deleted while being processed")
try:
if agent['pending_event'] is not None:
agent['pending_event'] = None
except KeyError:
pass
v_json_message = cloud_verifier_common.prepare_v(agent)
# TODO: remove special handling after initial upgrade
if agent['ssl_context']:
res = tornado_requests.request(
"POST", "https://%s:%d/v%s/keys/vkey" % (agent['ip'], agent['port'], agent['supported_version']),
data=v_json_message, context=agent['ssl_context'])
else:
res = tornado_requests.request(
"POST", "http://%s:%d/v%s/keys/vkey" % (agent['ip'], agent['port'], agent['supported_version']),
data=v_json_message)
response = await res
if response.status_code != 200:
if response.status_code in [500, 599]:
asyncio.ensure_future(
process_agent(agent, states.PROVIDE_V_RETRY))
else:
# catastrophic error, do not continue
logger.critical("Unexpected Provide V response error for cloud agent %s, Error: %s", agent['agent_id'], response.status_code)
failure.add_event("no_v", {"message": "Unexpected provide V response", "data": response.status_code}, False)
asyncio.ensure_future(process_agent(agent, states.FAILED, failure))
else:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
async def process_agent(agent, new_operational_state, failure=Failure(Component.INTERNAL, ["verifier"])):
# Convert to dict if the agent arg is a db object
if not isinstance(agent, dict):
agent = _from_db_obj(agent)
session = get_session()
try: # pylint: disable=R1702
main_agent_operational_state = agent['operational_state']
try:
stored_agent = session.query(VerfierMain).filter_by(
agent_id=str(agent['agent_id'])).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
# if the user did terminated this agent
if stored_agent.operational_state == states.TERMINATED:
logger.warning("Agent %s terminated by user.", agent['agent_id'])
if agent['pending_event'] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(
agent['pending_event'])
verifier_db_delete_agent(session, agent['agent_id'])
return
# if the user tells us to stop polling because the tenant quote check failed
if stored_agent.operational_state == states.TENANT_FAILED:
logger.warning("Agent %s has failed tenant quote. Stopping polling", agent['agent_id'])
if agent['pending_event'] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(
agent['pending_event'])
return
# If failed during processing, log regardless and drop it on the floor
# The administration application (tenant) can GET the status and act accordingly (delete/retry/etc).
if new_operational_state in (states.FAILED, states.INVALID_QUOTE):
assert failure, "States FAILED and INVALID QUOTE should only be reached with a failure message"
if agent.get('severity_level') is None or agent['severity_level'] < failure.highest_severity.severity:
agent['severity_level'] = failure.highest_severity.severity
agent['last_event_id'] = failure.highest_severity_event.event_id
agent['operational_state'] = new_operational_state
# issue notification for invalid quotes
if new_operational_state == states.INVALID_QUOTE:
cloud_verifier_common.notify_error(agent, event=failure.highest_severity_event)
# When the failure is irrecoverable we stop polling the agent
if not failure.recoverable or failure.highest_severity == MAX_SEVERITY_LABEL:
if agent['pending_event'] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(
agent['pending_event'])
for key in exclude_db:
if key in agent:
del agent[key]
session.query(VerfierMain).filter_by(
agent_id=agent['agent_id']).update(agent)
session.commit()
# propagate all state, but remove none DB keys first (using exclude_db)
try:
agent_db = dict(agent)
for key in exclude_db:
if key in agent_db:
del agent_db[key]
session.query(VerfierMain).filter_by(
agent_id=agent_db['agent_id']).update(agent_db)
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
# If agent was in a failed state we check if we either stop polling
# or just add it again to the event loop
if new_operational_state in [states.FAILED, states.INVALID_QUOTE]:
if not failure.recoverable or failure.highest_severity == MAX_SEVERITY_LABEL:
logger.warning("Agent %s failed, stopping polling", agent['agent_id'])
return
await invoke_get_quote(agent, False)
return
# if new, get a quote
if (main_agent_operational_state == states.START and
new_operational_state == states.GET_QUOTE):
agent['num_retries'] = 0
agent['operational_state'] = states.GET_QUOTE
await invoke_get_quote(agent, True)
return
if (main_agent_operational_state == states.GET_QUOTE and
new_operational_state == states.PROVIDE_V):
agent['num_retries'] = 0
agent['operational_state'] = states.PROVIDE_V
await invoke_provide_v(agent)
return
if (main_agent_operational_state in (states.PROVIDE_V, states.GET_QUOTE) and
new_operational_state == states.GET_QUOTE):
agent['num_retries'] = 0
interval = config.getfloat('cloud_verifier', 'quote_interval')
agent['operational_state'] = states.GET_QUOTE
if interval == 0:
await invoke_get_quote(agent, False)
else:
logger.debug("Setting up callback to check again in %f seconds", interval)
# set up a call back to check again
cb = functools.partial(invoke_get_quote, agent, False)
pending = tornado.ioloop.IOLoop.current().call_later(interval, cb)
agent['pending_event'] = pending
return
maxr = config.getint('cloud_verifier', 'max_retries')
interval = config.getfloat('cloud_verifier', 'retry_interval')
exponential_backoff = config.getboolean('cloud_verifier', 'exponential_backoff')
if (main_agent_operational_state == states.GET_QUOTE and
new_operational_state == states.GET_QUOTE_RETRY):
if agent['num_retries'] >= maxr:
logger.warning("Agent %s was not reachable for quote in %d tries, setting state to FAILED", agent['agent_id'], maxr)
failure.add_event("not_reachable", "agent was not reachable from verifier", False)
if agent['first_verified']: # only notify on previously good agents
cloud_verifier_common.notify_error(
agent, msgtype='comm_error', event=failure.highest_severity_event)
else:
logger.debug("Communication error for new agent. No notification will be sent")
await process_agent(agent, states.FAILED, failure)
else:
agent['operational_state'] = states.GET_QUOTE
cb = functools.partial(invoke_get_quote, agent, True)
agent['num_retries'] += 1
next_retry = retry.retry_time(exponential_backoff, interval, agent['num_retries'], logger)
logger.info("Connection to %s refused after %d/%d tries, trying again in %f seconds", agent['ip'], agent['num_retries'], maxr, next_retry)
tornado.ioloop.IOLoop.current().call_later(next_retry, cb)
return
if (main_agent_operational_state == states.PROVIDE_V and
new_operational_state == states.PROVIDE_V_RETRY):
if agent['num_retries'] >= maxr:
logger.warning("Agent %s was not reachable to provide v in %d tries, setting state to FAILED", agent['agent_id'], maxr)
failure.add_event("not_reachable_v", "agent was not reachable to provide V", False)
cloud_verifier_common.notify_error(
agent, msgtype='comm_error', event=failure.highest_severity_event)
await process_agent(agent, states.FAILED, failure)
else:
agent['operational_state'] = states.PROVIDE_V
cb = functools.partial(invoke_provide_v, agent)
agent['num_retries'] += 1
next_retry = retry.retry_time(exponential_backoff, interval, agent['num_retries'], logger)
logger.info("Connection to %s refused after %d/%d tries, trying again in %f seconds", agent['ip'], agent['num_retries'], maxr, next_retry)
tornado.ioloop.IOLoop.current().call_later(next_retry, cb)
return
raise Exception("nothing should ever fall out of this!")
except Exception as e:
logger.error("Polling thread error: %s", e)
logger.exception(e)
async def activate_agents(verifier_id, verifier_ip, verifier_port, mtls_options):
session = get_session()
aas = get_AgentAttestStates()
try:
agents = session.query(VerfierMain).filter_by(
verifier_id=verifier_id).all()
for agent in agents:
agent.verifier_ip = verifier_ip
agent.verifier_host = verifier_port
agent_run = _from_db_obj(agent)
if agent_run["mtls_cert"]:
agent_run["ssl_context"] = web_util.generate_agent_mtls_context(agent_run["mtls_cert"], mtls_options)
if agent.operational_state == states.START:
asyncio.ensure_future(process_agent(agent_run, states.GET_QUOTE))
if agent.boottime:
ima_pcrs_dict = {}
for pcr_num in agent.ima_pcrs:
ima_pcrs_dict[pcr_num] = getattr(agent, 'pcr%d' % pcr_num)
aas.add(agent.agent_id, agent.boottime, ima_pcrs_dict, agent.next_ima_ml_entry, agent.learned_ima_keyrings)
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
def main():
"""Main method of the Cloud Verifier Server. This method is encapsulated in a function for packaging to allow it to be
called as a function by an external program."""
cloudverifier_port = config.get('cloud_verifier', 'cloudverifier_port')
cloudverifier_host = config.get('cloud_verifier', 'cloudverifier_ip')
cloudverifier_id = config.get('cloud_verifier', 'cloudverifier_id', fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID)
# allow tornado's max upload size to be configurable
max_upload_size = None
if config.has_option('cloud_verifier', 'max_upload_size'):
max_upload_size = int(config.get('cloud_verifier', 'max_upload_size'))
# set a conservative general umask
os.umask(0o077)
VerfierMain.metadata.create_all(engine, checkfirst=True)
session = get_session()
try:
query_all = session.query(VerfierMain).all()
for row in query_all:
if row.operational_state in states.APPROVED_REACTIVATE_STATES:
row.operational_state = states.START
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
num = session.query(VerfierMain.agent_id).count()
if num > 0:
agent_ids = session.query(VerfierMain.agent_id).all()
logger.info("Agent ids in db loaded from file: %s", agent_ids)
logger.info('Starting Cloud Verifier (tornado) on port %s, use <Ctrl-C> to stop', cloudverifier_port)
# print out API versions we support
keylime_api_version.log_api_versions(logger)
context, mtls_options = web_util.init_mtls(logger=logger)
# Check for user defined CA to connect to agent
agent_mtls_cert = config.get("cloud_verifier", "agent_mtls_cert", fallback=None)
agent_mtls_private_key = config.get("cloud_verifier", "agent_mtls_private_key", fallback=None)
agent_mtls_private_key_pw = config.get("cloud_verifier", "agent_mtls_private_key_pw", fallback=None)
# Only set custom options if the cert should not be the same as used by the verifier
if agent_mtls_cert != "CV":
mtls_options = (agent_mtls_cert, agent_mtls_private_key, agent_mtls_private_key_pw)
app = tornado.web.Application([
(r"/v?[0-9]+(?:\.[0-9]+)?/agents/.*", AgentsHandler, {"mtls_options": mtls_options}),
(r"/v?[0-9]+(?:\.[0-9]+)?/allowlists/.*", AllowlistHandler),
(r"/versions?", VersionHandler),
(r".*", MainHandler),
])
sockets = tornado.netutil.bind_sockets(
int(cloudverifier_port), address=cloudverifier_host)
def server_process(task_id):
logger.info("Starting server of process %s", task_id)
engine.dispose()
server = tornado.httpserver.HTTPServer(app, ssl_options=context, max_buffer_size=max_upload_size)
server.add_sockets(sockets)
def server_sig_handler(*_):
logger.info("Shutting down server %s..", task_id)
# Stop server to not accept new incoming connections
server.stop()
# Wait for all connections to be closed and then stop ioloop
async def stop():
await server.close_all_connections()
tornado.ioloop.IOLoop.current().stop()
asyncio.ensure_future(stop())
# Attach signal handler to ioloop.
# Do not use signal.signal(..) for that because it does not work!
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, server_sig_handler)
loop.add_signal_handler(signal.SIGTERM, server_sig_handler)
server.start()
if task_id == 0:
# Reactivate agents
asyncio.ensure_future(activate_agents(cloudverifier_id, cloudverifier_host, cloudverifier_port, mtls_options))
tornado.ioloop.IOLoop.current().start()
logger.debug("Server %s stopped.", task_id)
sys.exit(0)
processes = []
def sig_handler(*_):
if config.getboolean('cloud_verifier', 'revocation_notifier'):
revocation_notifier.stop_broker()
for p in processes:
p.join()
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
if config.getboolean('cloud_verifier', 'revocation_notifier'):
logger.info("Starting service for revocation notifications on port %s",
config.getint('cloud_verifier', 'revocation_notifier_port'))
revocation_notifier.start_broker()
num_workers = config.getint(
'cloud_verifier', 'multiprocessing_pool_num_workers')
if num_workers <= 0:
num_workers = tornado.process.cpu_count()
for task_id in range(0, num_workers):
process = Process(target=server_process, args=(task_id,))
process.start()
processes.append(process)
|
__init__.py
|
import itertools
import platform
import threading
import time
from xml.etree import ElementTree
import serial
from emu_power import response_entities
if platform.system() == 'Darwin':
_DEFAULT_DEVICE = '/dev/tty.usbmodem11'
elif platform.system() == 'Linux':
_DEFAULT_DEVICE = '/dev/ttyACM0'
else:
_DEFAULT_DEVICE = None
class Emu:
# Construct a new Emu object. Set synchronous to true to to attempt to
# return results synchronously if possible. Timeout is the time period
# in seconds until a request is considered failed. Poll factor indicates
# the fraction of a second to check for a response. Set fresh_only to True
# to only return fresh responses from get_data. Only useful in asynchronous mode.
def __init__(self, debug=False, fresh_only=False, synchronous=False, timeout=10, poll_factor=2):
# Internal communication
self._channel_open = False
self._serial_port = None
self._thread_handle = None
self._stop_thread = False
self.debug = debug
self.fresh_only = fresh_only
self.synchronous = synchronous
self.timeout = timeout
self.poll_factor = poll_factor
# Data, updated asynchronously by thread, keyed
# by root element. These are defined by classes
# in response_entities.py
self._data = {}
# TODO: Implement history mechanism
# Get the most recent fresh response that has come in. This
# should be used in asynchronous mode.
def get_data(self, klass):
res = self._data.get(klass.tag_name())
if not self.fresh_only:
return res
if res is None or not res.fresh:
return None
res.fresh = False
return res
# Open communication channel
def start_serial(self, port_name=_DEFAULT_DEVICE):
assert port_name, (
"Must specify a port name; cannot determine default for your OS")
if self._channel_open:
return True
try:
self._serial_port = serial.Serial(port_name, 115200, timeout=1)
except serial.serialutil.SerialException:
return False
self._thread_handle = threading.Thread(target=self._communication_thread)
self._thread_handle.start()
self._channel_open = True
return True
# Close the communication channel
def stop_serial(self):
if not self._channel_open:
return True
self._stop_thread = True
self._thread_handle.join()
self._thread_handle = None
self._serial_port.close()
self._serial_port = None
return True
# Main communication thread - handles all asynchronous messaging
def _communication_thread(self):
while True:
if self._stop_thread:
self._stop_thread = False
return
# Update read data, ignoring timeouts
bin_lines = self._serial_port.readlines()
if len(bin_lines) > 0:
# A response can have multiple fragments, so we wrap them in a pseudo root for parsing
try:
wrapped = itertools.chain('<Root>', bin_lines, '</Root>')
root = ElementTree.fromstringlist(wrapped)
except ElementTree.ParseError:
if self.debug:
print("Malformed XML " + b''.join(bin_lines).decode('ASCII'))
continue
for tree in root:
if self.debug:
ElementTree.dump(tree)
response_type = tree.tag
klass = response_entities.Entity.tag_to_class(response_type)
if klass is None:
if self.debug:
print("Unsupported tag " + response_type)
continue
else:
self._data[response_type] = klass(tree)
# Issue a command to the device. Pass the command name as the first
# argument, and any additional params as a dict. Will return immediately
# unless the synchronous attribute on the library is true, in which case
# it will return data when available, or None if the timeout has elapsed.
def issue_command(self, command, params=None, return_class=None):
if not self._channel_open:
raise ValueError("Serial port is not open")
root = ElementTree.Element('Command')
name_field = ElementTree.SubElement(root, 'Name')
name_field.text = command
if params is not None:
for k, v in params.items():
if v is not None:
field = ElementTree.SubElement(root, k)
field.text = v
bin_string = ElementTree.tostring(root)
if self.debug:
ElementTree.dump(root)
if (not self.synchronous) or return_class is None:
if self.debug:
print("Object is in asynchronous mode or command does not have return type - not waiting for response")
self._serial_port.write(bin_string)
return True
# Do our best to return results synchronously
tag = return_class.tag_name()
# Invalidate current response
cur = self._data.get(tag)
if cur is not None:
cur.fresh = False
self._serial_port.write(bin_string)
step = 1.0 / self.poll_factor
for i in range(0, self.timeout * self.poll_factor):
d = self._data.get(tag)
if d is not None and d.fresh:
return d
else:
time.sleep(step)
return None
# Convert boolean to Y/N for commands
def _format_yn(self, value):
if value is None:
return None
if value:
return 'Y'
else:
return 'N'
# Convert an integer into a hex string
def _format_hex(self, num, digits=8):
return "0x{:0{digits}x}".format(num, digits=digits)
# Check if an event is a valid value
def _check_valid_event(self, event, allow_none=True):
enum = ['time', 'summation', 'billing_period', 'block_period',
'message', 'price', 'scheduled_prices', 'demand']
if allow_none:
enum.append(None)
if event not in enum:
raise ValueError('Invalid event specified')
# The following are convenience methods for sending commands. Commands
# can also be sent manually using the generic issue_command method.
#################################
# Raven Commands #
#################################
def restart(self):
return self.issue_command('restart')
# Dangerous! Will decommission device!
def factory_reset_warning_dangerous(self):
return self.issue_command('factory_reset')
def get_connection_status(self):
return self.issue_command('get_connection_status', return_class=response_entities.ConnectionStatus)
def get_device_info(self):
return self.issue_command('get_device_info', return_class=response_entities.DeviceInfo)
def get_schedule(self, mac=None, event=None):
self._check_valid_event(event)
opts = {'MeterMacId': mac, 'Event': event}
return self.issue_command('get_schedule', opts, return_class=response_entities.ScheduleInfo)
def set_schedule(self, mac=None, event=None, frequency=10, enabled=True):
self._check_valid_event(event, allow_none=False)
opts = {
'MeterMacId': mac,
'Event': event,
'Frequency': self._format_hex(frequency),
'Enabled': self._format_yn(enabled)
}
return self.issue_command('set_schedule', opts)
def set_schedule_default(self, mac=None, event=None):
self._check_valid_event(event)
opts = {'MeterMacId': mac, 'Event': event}
return self.issue_command('set_schedule_default', opts)
def get_meter_list(self):
return self.issue_command('get_meter_list', return_class=response_entities.MeterList)
##########################
# Meter Commands #
##########################
def get_meter_info(self, mac=None):
opts = {'MeterMacId': mac}
return self.issue_command('get_meter_info', opts, return_class=response_entities.MeterInfo)
def get_network_info(self):
return self.issue_command('get_network_info', return_class=response_entities.NetworkInfo)
def set_meter_info(self, mac=None, nickname=None, account=None, auth=None, host=None, enabled=None):
opts = {
'MeterMacId': mac,
'NickName': nickname,
'Account': account,
'Auth': auth,
'Host': host,
'Enabled': self._format_yn(enabled)
}
return self.issue_command('set_meter_info', opts)
############################
# Time Commands #
############################
def get_time(self, mac=None, refresh=True):
opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)}
return self.issue_command('get_time', opts, return_class=response_entities.TimeCluster)
def get_message(self, mac=None, refresh=True):
opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)}
return self.issue_command('get_message', opts, return_class=response_entities.MessageCluster)
def confirm_message(self, mac=None, message_id=None):
if message_id is None:
raise ValueError('Message id is required')
opts = {'MeterMacId': mac, 'Id': self._format_hex(message_id)}
return self.issue_command('confirm_message', opts)
#########################
# Price Commands #
#########################
def get_current_price(self, mac=None, refresh=True):
opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)}
return self.issue_command('get_current_price', opts, return_class=response_entities.PriceCluster)
# Price is in cents, w/ decimals (e.g. "24.373")
def set_current_price(self, mac=None, price="0.0"):
parts = price.split(".", 1)
if len(parts) == 1:
trailing = 2
price = int(parts[0])
else:
trailing = len(parts[1]) + 2
price = int(parts[0] + parts[1])
opts = {
'MeterMacId': mac,
'Price': self._format_hex(price),
'TrailingDigits': self._format_hex(trailing, digits=2)
}
return self.issue_command('set_current_price', opts)
###############################
# Simple Metering Commands #
###############################
def get_instantaneous_demand(self, mac=None, refresh=True):
opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)}
return self.issue_command('get_instantaneous_demand', opts, return_class=response_entities.InstantaneousDemand)
def get_current_summation_delivered(self, mac=None, refresh=True):
opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)}
return self.issue_command('get_current_summation_delivered', opts, return_class=response_entities.CurrentSummationDelivered)
def get_current_period_usage(self, mac=None):
opts = {'MeterMacId': mac}
return self.issue_command('get_current_period_usage', opts, return_class=response_entities.CurrentPeriodUsage)
def get_last_period_usage(self, mac=None):
opts = {'MeterMacId': mac}
return self.issue_command('get_last_period_usage', opts, return_class=response_entities.LastPeriodUsage)
def close_current_period(self, mac=None):
opts = {'MeterMacId': mac}
return self.issue_command('close_current_period', opts)
def set_fast_poll(self, mac=None, frequency=4, duration=20):
opts = {
'MeterMacId': mac,
'Frequency': self._format_hex(frequency, digits=4),
'Duration': self._format_hex(duration, digits=4)
}
return self.issue_command('set_fast_poll', opts)
|
wsdump.py
|
#!/Users/nimblesixthousand/Desktop/apps/raiden/raiden_project/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
test_autograd.py
|
import gc
import sys
import io
import math
import random
import tempfile
import time
import threading
import unittest
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, format_time, EventList,
FunctionEvent, FunctionEventAvg,
record_function, emit_nvtx)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
load_tests,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TEST_WITH_ROCM, disable_gc,
gradcheck, gradgradcheck, make_tensor)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import (
unpack_variables,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf, skipMeta)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import pickle
PRECISION = 1e-4
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
mat = torch.randn(2, 3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.mv, choose a different op
res = torch.mv(mat, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, dtype=torch.double, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
# TODO: opinfo this or move to unbind's test suite
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
# TODO: opinfo this or move to fill's test suite
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
# TODO: Create OpInfos for these ops
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True),
torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True),
torch.randn(1, 1, dtype=torch.double, requires_grad=True),
torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat(self):
f_args_variable = (torch.randn(1, S, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, S, dtype=torch.double, requires_grad=True),
0)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_1(self):
f_args_variable = (torch.randn(S, S, 1, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 2, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 3, dtype=torch.double, requires_grad=True),
-1)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_1", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_2(self):
f_args_variable = (torch.randn(S, 1, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 2, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 3, S, dtype=torch.double, requires_grad=True),
-2)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_2", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor, check_forward_ad=True)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_cat_empty(self):
f_args_variable = (torch.randn(0, S, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty", "cat",
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0))
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0))
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_maximum_and_minimum_subgradient(self):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True)
b = torch.tensor(b, requires_grad=True)
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(torch.maximum, [0., 1., 2.], [1., 1., 1.], [0., 0.5, 1.], [1., 0.5, 0.])
run_test(torch.minimum, [0., 1., 2.], [1., 1., 1.], [1., 0.5, 0.], [0., 0.5, 1.])
# TODO: norm is deprecated, update these tests and port them to OpInfos
# or test_linalg.py
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_sinc(self):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=torch.double,
requires_grad=True)
gradcheck(torch.sinc, a)
def test_igamma(self):
# 1e-3 offset to avoid zeros
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_igammac(self):
# 1e-3 offset to avoid zeros in s
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double)).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
# TODO: see if these tests can be ported to OpInfos or moved to
# test_tensor_creation_ops.py
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_()
end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_()
weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
# TODO: see if these tests can be moved to OpInfos or test_reductions.py
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
# TODO: see if these tests can be moved to OpInfo or test_binary_ufuncs.py
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertTrue(torch.allclose(non_inplace_grad, inplace_grad))
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2, dtype=torch.double)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3, dtype=torch.double)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement the backward"):
BadBw.apply(inp).sum().backward()
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original(self):
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
y = t * 2
with self.assertRaisesRegex(
RuntimeError,
"one of the variables needed for gradient computation has been modified by an inplace operation"):
y.sum().backward()
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original(self):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True,
check_forward_ad=False):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad, check_forward_ad=check_forward_ad))
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor, *, check_forward_ad=False):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable, check_forward_ad=check_forward_ad)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(torch.randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
# TODO: opinfo pdist
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
@skipMeta # LSTM cell reuses output which was resized
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
# TODO: see if this can be OpInfo'd or moved to test_reductions.py
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# TODO: OpInfo this or move to atleast's test suite
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
# TODO: opinfo this or move to test_binary_ufuncs.py
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
@torch.inference_mode()
def func(x):
self.assertTrue(torch.is_inference_mode_enabled())
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(torch.is_inference(d))
self.assertFalse(d.requires_grad)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
threads = []
for _ in range(num_threads):
p = threading.Thread(target=fn, args=(args))
p.start()
threads.append(p)
for p in threads:
p.join()
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
conversion_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
import threading
import types
import weakref
import gast
import six
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.impl.testing import pybind_for_testing
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class ConversionTest(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_whitelisted(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_whitelisted(test_fn))
self.assertTrue(conversion.is_whitelisted(utils))
self.assertTrue(conversion.is_whitelisted(constant_op.constant))
def test_is_whitelisted_tensorflow_like(self):
tf_like = imp.new_module('tensorflow_foo')
def test_fn():
pass
tf_like.test_fn = test_fn
test_fn.__module__ = tf_like
self.assertFalse(conversion.is_whitelisted(tf_like.test_fn))
def test_is_whitelisted_callable_whitelisted_call(self):
whitelisted_mod = imp.new_module('test_whitelisted_call')
sys.modules['test_whitelisted_call'] = whitelisted_mod
config.CONVERSION_RULES = ((config.DoNotConvert('test_whitelisted_call'),) +
config.CONVERSION_RULES)
class TestClass(object):
def __call__(self):
pass
def whitelisted_method(self):
pass
TestClass.__module__ = 'test_whitelisted_call'
if six.PY2:
TestClass.__call__.__func__.__module__ = 'test_whitelisted_call'
else:
TestClass.__call__.__module__ = 'test_whitelisted_call'
class Subclass(TestClass):
def converted_method(self):
pass
tc = Subclass()
self.assertTrue(conversion.is_whitelisted(TestClass.__call__))
self.assertTrue(conversion.is_whitelisted(tc))
self.assertTrue(conversion.is_whitelisted(tc.__call__))
self.assertTrue(conversion.is_whitelisted(tc.whitelisted_method))
self.assertFalse(conversion.is_whitelisted(Subclass))
self.assertFalse(conversion.is_whitelisted(tc.converted_method))
def test_is_whitelisted_tfmethodwrapper(self):
class TestClass(object):
def member_function(self):
pass
TestClass.__module__ = 'test_whitelisted_call'
test_obj = TestClass()
def test_fn(self):
del self
bound_method = types.MethodType(
test_fn,
function.TfMethodTarget(
weakref.ref(test_obj), test_obj.member_function))
self.assertTrue(conversion.is_whitelisted(bound_method))
def test_is_whitelisted_pybind(self):
test_object = pybind_for_testing.TestClassDef()
with test.mock.patch.object(config, 'CONVERSION_RULES', ()):
# TODO(mdan): This should return True for functions and methods.
# Note: currently, native bindings are whitelisted by a separate check.
self.assertFalse(conversion.is_whitelisted(test_object.method))
def test_convert_entity_to_ast_callable(self):
b = 2
def f(a):
return a + b
program_ctx = self._simple_program_ctx()
nodes, name, info = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertIs(info.namespace['b'], b)
def test_convert_entity_to_ast_function_with_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertEqual(
parser.unparse(fn_node.args.defaults[0],
include_encoding_marker=False).strip(), 'None')
def test_convert_entity_to_ast_call_tree(self):
def g(a):
return a
def f(a):
return g(a)
program_ctx = self._simple_program_ctx()
nodes, _, _ = conversion.convert_entity_to_ast(f, program_ctx)
f_node, = nodes
self.assertEqual('tf__f', f_node.name)
def test_convert_entity_to_ast_lambda(self):
b = 2
f = lambda x: b * x if x > 0 else -x
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_entity_to_ast_multiple_lambdas(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda y: b * y)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['a'], a)
def test_convert_entity_to_ast_multiple_lambdas_ambiguous_definitions(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda x: b * x)
program_ctx = self._simple_program_ctx()
with self.assertRaises(ValueError):
conversion.convert_entity_to_ast(f, program_ctx)
def test_convert_entity_to_ast_lambda_code_with_garbage(self):
# pylint:disable=g-long-lambda
f = ( # intentional wrap
lambda x: (
x # intentional wrap
+ 1),)[0]
# pylint:enable=g-long-lambda
program_ctx = self._simple_program_ctx()
(fn_node,), name, _ = conversion.convert_entity_to_ast(f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
def test_convert_entity_to_ast_nested_functions(self):
b = 2
def f(x):
def g(x):
return b * x
return g(x)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual(fn_node.name, 'tf__f')
self.assertEqual('tf__f', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_concurrency(self):
def test_fn():
pass
generated_file_names = []
def conversion_thread():
new_f = conversion.convert(test_fn, self._simple_program_ctx())
generated_file_names.append(new_f.__code__.co_filename)
threads = tuple(
threading.Thread(target=conversion_thread) for _ in range(10))
for t in threads:
t.start()
for t in threads:
t.join()
# Races would potentially create multiple files (non-deterministically,
# but with high likelihood).
self.assertEqual(len(set(generated_file_names)), 1)
def test_convert_reentrance(self):
def test_fn():
pass
# There are no known ways to cause convert to re-enter. So we instrument
# an internal function to do that instead.
old_node_to_graph = conversion.node_to_graph
self.num_conversions = 0
def node_to_graph_wrapper(node, context):
self.num_conversions += 1
if self.num_conversions < 2:
conversion.convert(test_fn, self._simple_program_ctx())
return old_node_to_graph(node, context)
try:
conversion.node_to_graph = node_to_graph_wrapper
new_f = conversion.convert(test_fn, self._simple_program_ctx())
self.assertIsNotNone(new_f)
finally:
conversion.node_to_graph = old_node_to_graph
if __name__ == '__main__':
test.main()
|
gate_tagging.py
|
import threading
import requests
import time
import json
import pandas as pd
import time
import unidecode
spanish_path = "./datasets/es.json"
english_path = "./datasets/all_data_en.json"
file = open(english_path, "r")
data = []
for index, line in enumerate(file):
data.append(json.loads(line))
if index == 1000:
break
text_raw_df = pd.json_normalize(data)
print(text_raw_df.shape)
text_raw_df["text"].head(10)
def worker(i, dfi, chunk_size):
"""thread worker function"""
tags = []
start_time = time.time()
print("Running worker:", i)
for num in range(dfi.shape[0]):
text = dfi.iloc[num, :]["text"]
text = unidecode.unidecode(text)
headers = {"Content-Type": "text/plain"}
r = requests.post("http://localhost:8080/", headers=headers, data=text)
try:
missinfo_class = r.json()["entities"]["MisinfoClass"][0]["class"]
tags.append(missinfo_class)
except:
print(" Thread:", i)
print("text:", text)
print(r.json())
index = list(range(i * chunk_size, (i + 1) * chunk_size))
final = pd.concat(
[pd.DataFrame(dfi), pd.Series(tags, name="gate_tags", index=index)], axis=1
)
final.to_json(f"./gateTagging/tagThread_{i}", orient="records", lines=True)
end_time = time.time()
print(f"Time worker_{i}:", end_time - start_time)
threads = []
for i in range(1):
chunk_size = 100
dfi = text_raw_df.iloc[i * chunk_size : (i + 1) * chunk_size, :]
t = threading.Thread(target=worker, args=(i, dfi, chunk_size))
threads.append(t)
t.start()
|
plugin.py
|
import datetime
import json
import re
import sys
import threading
import time
import traceback
import urllib.request, urllib.parse, urllib.error
hasWebsockets=False
try:
import websocket
hasWebsockets=True
except:
pass
from avnav_api import AVNApi
class Config(object):
def __init__(self,api):
self.port = 3000
self.period = 1000
self.chartQueryPeriod = 10
port = api.getConfigValue('port', '3000')
self.port = int(port)
period = api.getConfigValue('period', '1000')
self.period = int(period) / 1000
self.expiryPeriod = api.getExpiryPeriod()
if (self.period > self.expiryPeriod):
self.period = self.expiryPeriod
self.skHost = api.getConfigValue('host', 'localhost')
self.chartQueryPeriod = int(api.getConfigValue('chartQueryPeriod', '10000')) / 1000
self.proxyMode = api.getConfigValue('proxyMode', 'sameHost')
self.useWebsockets = api.getConfigValue('useWebsockets', 'true').lower() == 'true'
class Plugin(object):
PATH="gps.signalk"
CHARTNAME_PREFIX="sk-"
AVNAV_XML="""<?xml version="1.0" encoding="UTF-8" ?>
<TileMapService version="1.0.0" >
<Title>%(title)s</Title>
<TileMaps>
<TileMap
title="%(title)s"
href="%(url)s"
minzoom="%(minzoom)s"
maxzoom="%(maxzoom)s"
projection="EPSG:4326">
<BoundingBox minlon="%(minlon)f" minlat="%(minlat)f" maxlon="%(maxlon)f" maxlat="%(maxlat)f" title="layer"/>
<TileFormat width="256" height="256" mime-type="x-%(format)s" extension="%(format)s" />
</TileMap>
</TileMaps>
</TileMapService>
"""
CONFIG=[
{
'name':'port',
'description':'set to signalk port',
'default':'3000',
'type': 'NUMBER'
},
{
'name': 'host',
'description': 'set to signalk host',
'default': 'localhost'
},
{
'name':'period',
'description':'query period in ms',
'default':'1000',
'type':'NUMBER'
},
{
'name': 'chartQueryPeriod',
'description': 'charts query period in ms, 0 to disable',
'default': '10000',
'type':'NUMBER'
},
{
'name': 'chartProxyMode',
'description': 'proxy tile requests: never,always,sameHost',
'default': 'sameHost',
'type': 'SELECT',
'rangeOrList':['never','always','sameHost']
},
{
'name': 'useWebsockets',
'description': 'use websockets if the package is available - true or false',
'default': True,
'type': 'BOOLEAN'
}
]
@classmethod
def pluginInfo(cls):
"""
the description for the module
@return: a dict with the content described below
parts:
* description (mandatory)
* data: list of keys to be stored (optional)
* path - the key - see AVNApi.addData, all pathes starting with "gps." will be sent to the GUI
* description
"""
return {
'description': 'a plugin that fetches vessels data from signalk',
'version': '1.0',
'config': cls.CONFIG,
'data': [
{
'path': cls.PATH+".*",
'description': 'vessels data from signalk',
}
]
}
def __init__(self,api):
"""
initialize a plugins
do any checks here and throw an exception on error
do not yet start any threads!
@param api: the api to communicate with avnav
@type api: AVNApi
"""
self.api = api # type: AVNApi
self.api.registerRequestHandler(self.requestHandler)
self.skCharts=[]
self.connected=False
self.webSocket=None
self.useWebsockets=True
self.api.registerEditableParameters(self.CONFIG,self.changeParam)
self.api.registerRestart(self.stop)
self.startSequence=0
self.config=None # Config
self.userAppId=None
def stop(self):
self.startSequence+=1
def changeParam(self,param):
self.api.saveConfigValues(param)
self.startSequence+=1
def run(self):
self.api.registerLayout("example", "example.json")
self.api.registerChartProvider(self.listCharts)
while not self.api.shouldStopMainThread():
self.config=Config(self.api)
self._runInternal()
try:
self.webSocket.close()
except:
pass
def _runInternal(self):
sequence=self.startSequence
"""
the run method
this will be called after successfully instantiating an instance
this method will be called in a separate Thread
The example simply counts the number of NMEA records that are flowing through avnav
and writes them to the store every 10 records
@return:
"""
self.api.log("started with host %s port %d, period %d"
%(self.config.skHost,self.config.port,self.config.period))
baseUrl="http://%s:%d/signalk"%(self.config.skHost,self.config.port)
if self.userAppId is not None:
self.api.unregisterUserApp(self.userAppId)
self.userAppId=None
if self.config.skHost == "localhost":
self.userAppId=self.api.registerUserApp("http://$HOST:%s"%self.config.port,"signalk.svg")
else:
self.userAppId=self.api.registerUserApp("http://%s:%s" %
(self.config.skHost,self.config.port), "signalk.svg")
errorReported=False
self.api.setStatus("STARTED", "connecting at %s" % baseUrl)
while sequence == self.startSequence:
apiUrl=None
websocketUrl=None
if self.webSocket is not None:
try:
self.webSocket.close()
except:
pass
self.webSocket=None
while apiUrl is None :
if sequence != self.startSequence:
return
self.connected=False
responseData=None
try:
response=urllib.request.urlopen(baseUrl)
if response is None:
raise Exception("no response on %s"%baseUrl)
responseData=json.loads(response.read())
if responseData is None:
raise Exception("no response on %s"%baseUrl)
#{"endpoints":{"v1":{"version":"1.20.0","signalk-http":"http://localhost:3000/signalk/v1/api/","signalk-ws":"ws://localhost:3000/signalk/v1/stream","signalk-tcp":"tcp://localhost:8375"}},"server":{"id":"signalk-server-node","version":"1.20.0"}}
endpoints = responseData.get('endpoints')
if endpoints is None:
raise Exception("no endpoints in response to %s"%baseUrl)
for k in list(endpoints.keys()):
ep=endpoints[k]
if apiUrl is None:
apiUrl=ep.get('signalk-http')
if apiUrl is not None:
errorReported=False
if websocketUrl is None:
websocketUrl=ep.get("signalk-ws")
except:
if not errorReported:
self.api.setStatus("ERROR", "unable to connect at %s" % baseUrl)
self.api.log("unable to connect at url %s: %s" % (baseUrl, sys.exc_info()[0]))
errorReported=True
time.sleep(1)
continue
if apiUrl is None:
time.sleep(1)
else:
self.api.log("found api url %s",apiUrl)
selfUrl=apiUrl+"vessels/self"
self.connected = True
useWebsockets = self.useWebsockets and hasWebsockets and websocketUrl is not None
if useWebsockets:
if self.config.period < self.config.expiryPeriod:
self.config.period=self.config.expiryPeriod
self.api.log("using websockets at %s, querying with period %d", websocketUrl,self.config.period)
if self.webSocket is not None:
try:
self.webSocket.close()
except:
self.api.debug("error when closing websocket: %s",traceback.format_exc())
self.webSocket=websocket.WebSocketApp(websocketUrl,
on_error=self.webSocketError,
on_message=self.webSocketMessage,
on_close=self.webSocketClose,
on_open=self.webSocketOpen)
self.api.log("websocket created at %s",self.webSocket.url)
webSocketThread=threading.Thread(name="signalk-websocket",target=self.webSocketRun)
webSocketThread.setDaemon(True)
webSocketThread.start()
try:
lastChartQuery=0
lastQuery=0
first=True # when we newly connect, just query everything once
errorReported=False
while self.connected and self.startSequence == sequence:
now = time.time()
#handle time shift backward
if lastChartQuery > now:
lastChartQuery=0
if lastQuery > now:
lastQuery=0
if (now - lastQuery) > self.config.period or first:
first=False
lastQuery=now
response=None
try:
response=urllib.request.urlopen(selfUrl)
if response is None:
self.skCharts = []
if not errorReported:
self.api.error("unable to fetch from %s: None", selfUrl)
errorReported=True
except Exception as e:
self.skCharts=[]
if not errorReported:
self.api.error("unable to fetch from %s:%s",selfUrl,str(e))
errorReported=True
if response is not None:
errorReported=False
if not first:
self.api.setStatus("NMEA", "connected at %s" % apiUrl)
data=json.loads(response.read())
self.api.debug("read: %s",json.dumps(data))
self.storeData(data,self.PATH)
name=data.get('name')
if name is not None:
self.api.addData(self.PATH+".name",name)
else:
pass
if self.config.chartQueryPeriod > 0 and lastChartQuery < (now - self.config.chartQueryPeriod):
lastChartQuery=now
try:
self.queryCharts(apiUrl,self.config.port)
except Exception as e:
self.skCharts=[]
self.api.debug("exception while reading chartlist %s",traceback.format_exc())
sleepTime=1 if self.config.period > 1 else self.config.period
time.sleep(sleepTime)
except:
self.api.log("error when fetching from signalk %s: %s",apiUrl,traceback.format_exc())
self.api.setStatus("ERROR","error when fetching from signalk %s"%(apiUrl))
self.connected=False
if sequence != self.startSequence:
return
time.sleep(5)
def webSocketRun(self):
self.api.log("websocket receiver started")
self.webSocket.run_forever()
self.api.log("websocket receiver finished")
def webSocketOpen(self,*args):
self.api.log("websocket connected")
#there is a change in the websocket client somewhere between
#0.44 and 0.55 - the newer versions omit the ws parameter
def getParam(self,*args):
if len(args) > 1:
return args[1]
if len(args) > 0:
return args[0]
def webSocketError(self,*args):
error=self.getParam(*args)
self.api.error("error on websocket connection: %s", error)
try:
self.api.setStatus("ERROR", "error on websocket connection %s: %s" % (self.webSocket.url, error))
self.webSocket.close()
except:
pass
self.webSocket=None
self.connected=False
def webSocketClose(self,*args):
self.api.log("websocket connection closed")
self.connected=False
try:
self.api.setStatus("ERROR", "connection closed at %s" % self.webSocket.url)
except:
pass
self.webSocket=None
def webSocketMessage(self,*args):
message=self.getParam(*args)
self.api.setStatus("NMEA", "connected at %s" % self.webSocket.url)
self.api.debug("received: %s",message)
try:
data=json.loads(message)
updates=data.get('updates')
if updates is None:
return
for update in updates:
values=update.get('values')
if values is None:
continue
for item in values:
value=item.get('value')
path=item.get('path')
if value is not None and path is not None:
if path.startswith("notifications"):
#TODO: handle notifications
pass
else:
self.api.addData(self.PATH+"."+path,value, 'signalk')
except:
self.api.error("error decoding %s:%s",message,traceback.format_exc())
try:
self.webSocket.close()
except:
pass
self.webSocket=None
self.connected=False
def queryCharts(self,apiUrl,port):
charturl = apiUrl + "resources/charts"
chartlistResponse = urllib.request.urlopen(charturl)
if chartlistResponse is None:
self.skCharts = []
return
chartlist = json.loads(chartlistResponse.read())
newList = []
pluginUrl= self.api.getBaseUrl()
baseUrl = pluginUrl + "/api/charts/"
for chart in list(chartlist.values()):
name = chart.get('identifier')
if name is None:
continue
name = self.CHARTNAME_PREFIX + name
url = baseUrl + urllib.parse.quote(name)
bounds=chart.get('bounds')
#bounds is upperLeftLon,upperLeftLat,lowerRightLon,lowerRightLat
# minlon, maxlat, maxlon, minlat
if bounds is None:
bounds=[-180,85,180,-85]
if bounds[1] < bounds[3]:
#it seems that the plugin does not really provide the BB correctly...
tmp=bounds[3]
bounds[3]=bounds[1]
bounds[1]=tmp
chartInfo = {
'name': name,
'url': url,
'charturl': url,
'sequence': self.startSequence,
'canDelete': False,
'icon': pluginUrl+"/signalk.svg",
'upzoom': True,
'internal': {
'url': "http://%s:%d" % (self.config.skHost, port) + chart.get('tilemapUrl'),
'minlon': bounds[0],
'maxlat': bounds[1],
'maxlon': bounds[2],
'minlat': bounds[3],
'format': chart.get('format') or 'png',
'bounds': chart.get('bounds'),
'minzoom': chart.get('minzoom'),
'maxzoom': chart.get('maxzoom')
}
}
newList.append(chartInfo)
self.skCharts = newList
def storeData(self,node,prefix):
if 'value' in node:
self.api.addData(prefix, node.get('value'), 'signalk')
return
for key, item in list(node.items()):
if isinstance(item,dict):
self.storeData(item,prefix+"."+key)
def listCharts(self,hostip):
self.api.debug("listCharts %s"%hostip)
if not self.connected:
self.api.debug("not yet connected")
return []
try:
rt=[]
items=self.skCharts+[]
for item in items:
cp=item.copy()
del cp['internal']
rt.append(cp)
return rt
except:
self.api.debug("unable to list charts: %s"%traceback.format_exc())
return []
def requestHandler(self,url,handler,args):
'''
handle api requests
@param url:
@param handler:
@param args:
@return:
'''
if url.startswith("charts/"):
chart=url[len("charts/"):]
parr=chart.split("/")
if len(parr) < 2:
raise Exception("invalid chart url %s"%url)
chartName = parr[0]
chart=None
for chartinfo in self.skCharts:
if chartinfo.get('name')==chartName:
chart=chartinfo
break
if chart is None:
raise Exception("chart %s not found"%chartName)
if parr[1] == "avnav.xml":
requestHost = handler.headers.get('host')
requestHostAddr = requestHost.split(':')[0]
url='tiles'
doProxy=False
if self.config.proxyMode=='always' or ( self.config.proxyMode=='sameHost' and self.config.skHost != 'localhost'):
doProxy=True
if not doProxy:
#no proxying, direct access to sk for charts
url=chart['internal']['url'].replace('localhost',requestHostAddr)
param=chart['internal'].copy()
param.update({
'title':chart['name'],
'url':url,
})
data=self.AVNAV_XML%param
handler.send_response(200)
handler.send_header("Content-type", "text/xml")
handler.send_header("Content-Length", len(data))
handler.send_header("Last-Modified", handler.date_time_string())
handler.end_headers()
handler.wfile.write(data.encode('utf-8'))
return True
if parr[1] == "sequence":
return {'status':'OK','sequence':0}
if len(parr) < 5:
raise Exception("invalid request to chart %s: %s" % (chartName, url))
replaceV={'z':parr[2],
'x':parr[3],
'y':re.sub("\..*","",parr[4])}
skurl=chart['internal']['url']
for k in list(replaceV.keys()):
skurl=skurl.replace("{"+k+"}",replaceV[k])
try:
tile = urllib.request.urlopen(skurl)
if tile is None:
return None
tileData = tile.read()
except:
self.api.debug("unable to read tile from sk %s:%s"%(url,traceback.format_exc()))
return
handler.send_response(200)
handler.send_header("Content-type", "image/%s"%chart['internal']['format'])
handler.send_header("Content-Length", len(tileData))
handler.send_header("Last-Modified", handler.date_time_string())
handler.end_headers()
handler.wfile.write(tileData)
return True
|
k-scale.py
|
# Lapse-Pi timelapse controller for Raspberry Pi
# This must run as root (sudo python lapse.py) due to framebuffer, etc.
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
#
# Prerequisite tutorials: aside from the basic Raspbian setup and PiTFT setup
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
#
# lapse.py by David Hunt (dave@davidhunt.ie)
# based on cam.py by Phil Burgess / Paint Your Dragon for Adafruit Industries.
# BSD license, all text above must be included in any redistribution.
import wiringpi2
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import os
import pygame
import threading
import signal
import sys
from pygame.locals import *
from subprocess import call
from time import sleep
from datetime import datetime, timedelta
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def motorCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global motorRunning
global motorDirection
global motorpin
global motorpinA
global motorpinB
if n == 1:
motorDirection = 1
motorpin = motorpinA
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
elif n == 2:
motorDirection = 0
motorpin = motorpinB
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
def numericCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global numberstring
if n < 10:
numberstring = numberstring + str(n)
elif n == 10:
numberstring = numberstring[:-1]
elif n == 11:
screenMode = 1
elif n == 12:
screenMode = returnScreen
numeric = int(numberstring)
v[dict_idx] = numeric
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode < 1: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = 1
def valuesCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global returnScreen
global numberstring
global numeric
global v
global dict_idx
if n == -1:
screenMode = 0
saveSettings()
if n == 1:
dict_idx='Pulse'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 2:
dict_idx='Interval'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 3:
dict_idx='Images'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
def viewCallback(n): # Viewfinder buttons
global screenMode, screenModePrior
if n is 0: # Gear icon
screenMode = 1
def doneCallback(): # Exit settings
global screenMode
if screenMode > 0:
saveSettings()
screenMode = 0 # Switch back to main window
def startCallback(n): # start/Stop the timelapse thread
global t, busy, threadExited
global currentframe
if n == 1:
if busy == False:
if (threadExited == True):
# Re-instanciate the object for the next start
t = threading.Thread(target=timeLapse)
threadExited = False
t.start()
if n == 0:
if busy == True:
busy = False
t.join()
currentframe = 0
# Re-instanciate the object for the next time around.
t = threading.Thread(target=timeLapse)
def timeLapse():
global v
global settling_time
global shutter_length
global motorpin
global shutterpin
global backlightpin
global busy, threadExited
global currentframe
busy = True
for i in range( 1 , v['Images'] + 1 ):
if busy == False:
break
currentframe = i
gpio.digitalWrite(motorpin,gpio.HIGH)
pulse = float(v['Pulse'])/1000.0
sleep(pulse)
gpio.digitalWrite(motorpin,gpio.LOW)
sleep(settling_time)
# disable the backlight, critical for night timelapses, also saves power
os.system("echo '0' > /sys/class/gpio/gpio252/value")
gpio.digitalWrite(shutterpin,gpio.HIGH)
sleep(shutter_length)
gpio.digitalWrite(shutterpin,gpio.LOW)
# enable the backlight
os.system("echo '1' > /sys/class/gpio/gpio252/value")
interval = float(v['Interval'])/1000.0
if (interval > shutter_length):
sleep(interval - shutter_length)
currentframe = 0
busy = False
threadExited = True
def signal_handler(signal, frame):
print 'got SIGTERM'
pygame.quit()
sys.exit()
# Global stuff -------------------------------------------------------------
t = threading.Thread(target=timeLapse)
busy = False
threadExited = False
screenMode = 0 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
numeric = 0 # number from numeric keypad
numberstring = "0"
motorRunning = 0
motorDirection = 0
returnScreen = 0
#shutterpin = 17 # Chris Debug - original line
shutterpin = 0
#motorpinA = 18 # Chris Debug - original line
motorpinA = 2
#motorpinB = 27 # Chris Debug - original line
motorpinB = 3
motorpin = motorpinA
#backlightpin = 252 # Chris Debug - original line
backlightpin = 18 # Chris debug - piTFT needs pins 10-14 & 24-25, GPIO pin 18 is for backlight pwm
currentframe = 0
framecount = 100
settling_time = 0.2
shutter_length = 0.2
interval_delay = 0.2
dict_idx = "Interval"
v = { "Pulse": 100,
"Interval": 3000,
"Images": 150}
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen mode 0 is main view screen of current status
[Button(( 5,180,120, 60), bg='start', cb=startCallback, value=1),
Button((130,180, 60, 60), bg='cog', cb=viewCallback, value=0),
Button((195,180,120, 60), bg='stop', cb=startCallback, value=0)],
# Screen 1 for changing values and setting motor direction
[Button((260, 0, 60, 60), bg='cog', cb=valuesCallback, value=1),
Button((260, 60, 60, 60), bg='cog', cb=valuesCallback, value=2),
Button((260,120, 60, 60), bg='cog', cb=valuesCallback, value=3),
Button(( 0,180,160, 60), bg='ok', cb=valuesCallback, value=-1),
Button((160,180, 70, 60), bg='left', cb=motorCallback, value=1),
Button((230,180, 70, 60), bg='right', cb=motorCallback, value=2)],
# Screen 2 for numeric input
[Button(( 0, 0,320, 60), bg='box'),
Button((180,120, 60, 60), bg='0', cb=numericCallback, value=0),
Button(( 0,180, 60, 60), bg='1', cb=numericCallback, value=1),
Button((120,180, 60, 60), bg='3', cb=numericCallback, value=3),
Button(( 60,180, 60, 60), bg='2', cb=numericCallback, value=2),
Button(( 0,120, 60, 60), bg='4', cb=numericCallback, value=4),
Button(( 60,120, 60, 60), bg='5', cb=numericCallback, value=5),
Button((120,120, 60, 60), bg='6', cb=numericCallback, value=6),
Button(( 0, 60, 60, 60), bg='7', cb=numericCallback, value=7),
Button(( 60, 60, 60, 60), bg='8', cb=numericCallback, value=8),
Button((120, 60, 60, 60), bg='9', cb=numericCallback, value=9),
Button((240,120, 80, 60), bg='del', cb=numericCallback, value=10),
Button((180,180,140, 60), bg='ok', cb=numericCallback, value=12),
Button((180, 60,140, 60), bg='cancel',cb=numericCallback, value=11)]
]
# Assorted utility functions -----------------------------------------------
def saveSettings():
global v
try:
outfile = open('lapse.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
pickle.dump(v, outfile)
outfile.close()
except:
pass
def loadSettings():
global v
try:
infile = open('lapse.pkl', 'rb')
v = pickle.load(infile)
infile.close()
except:
pass
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV' , '/dev/fb1')
os.putenv('SDL_MOUSEDRV' , 'TSLIB')
os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# Init pygame and screen
print "Initting..."
pygame.init()
print "Setting Mouse invisible..."
pygame.mouse.set_visible(False)
print "Setting fullscreen..."
modes = pygame.display.list_modes(16)
screen = pygame.display.set_mode(modes[0], FULLSCREEN, 16)
print "Loading Icons..."
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
print"Assigning Buttons"
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
# Set up GPIO pins
# Chris Debug - original GPIO configuration broke screen because motorpinA was the backlight GPIO pin
print "Init GPIO pins..."
gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_GPIO)
gpio.pinMode(backlightpin,gpio.PWM_OUTPUT) # Chris Debug - moved backlight to correct pin for piTFT, set to PWM mode
gpio.pinMode(shutterpin,gpio.OUTPUT)
gpio.pinMode(motorpinA,gpio.OUTPUT)
gpio.pinMode(motorpinB,gpio.OUTPUT)
gpio.pinMode(motorpinB,gpio.OUTPUT)
gpio.pwmWrite(backlightpin,1024) # Chris debug - pwm range is 0-1024
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
''' # Chris Debug
# I couldnt seem to get at pin 252 for the backlight using the usual method above,
# but this seems to work
os.system("echo 252 > /sys/class/gpio/export")
os.system("echo 'out' > /sys/class/gpio/gpio252/direction")
os.system("echo '1' > /sys/class/gpio/gpio252/value")
''' # Chris Debug
print"Load Settings"
loadSettings() # Must come last; fiddles with Button/Icon states
print "loading background.."
#img = pygame.image.load("icons/LapsePi.png") # Chris Debug - Original line
img = pygame.image.load("icons/london.png") # Chris Debug - picture of Chris & Suzy with correct dimensions
if img is None or img.get_height() < 320: # Letterbox, clear background # Chris Debug - changed 240 to 320
screen.fill(0)
if img:
screen.blit(img,
((480 - img.get_width() ) / 2, # Chris Debug - changed from 320 to 480
(320 - img.get_height()) / 2)) # Chris Debug - changed from 240 to 320
pygame.display.update()
sleep(2)
# Main loop ----------------------------------------------------------------
signal.signal(signal.SIGTERM, signal_handler)
print "mainloop.."
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
elif(event.type is MOUSEBUTTONUP):
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
if screenMode >= 0 or screenMode != screenModePrior: break
if img is None or img.get_height() < 320: # Letterbox, clear background # Chris Debug - changed 240 to 320
screen.fill(0)
if img:
screen.blit(img,
((480 - img.get_width() ) / 2, # Chris Debug - changed from 320 to 480
(320 - img.get_height()) / 2)) # Chris Debug - changed from 240 to 320
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
if screenMode == 2:
myfont = pygame.font.SysFont("Arial", 50)
label = myfont.render(numberstring, 1, (255,255,255))
screen.blit(label, (10, 2))
if screenMode == 1:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 70))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 70))
label = myfont.render(str(v['Images']) , 1, (255,255,255))
screen.blit(label, (130,130))
if screenMode == 0:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 50))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10, 90))
label = myfont.render("Remaining:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 50))
label = myfont.render(str(currentframe) + " of " + str(v['Images']) , 1, (255,255,255))
screen.blit(label, (160, 90))
intervalLength = float((v['Pulse'] + v['Interval'] + (settling_time*1000) + (shutter_length*1000)))
remaining = float((intervalLength * (v['Images'] - currentframe)) / 1000)
sec = timedelta(seconds=int(remaining))
d = datetime(1,1,1) + sec
remainingStr = "%dh%dm%ds" % (d.hour, d.minute, d.second)
label = myfont.render(remainingStr , 1, (255,255,255))
screen.blit(label, (160, 130))
pygame.display.update()
screenModePrior = screenMode
|
test_logutil.py
|
import errno
import logging
import os
import subprocess32
import threading
import unittest
from pykit import logutil
logger = logging.getLogger(__name__)
def subproc(script, cwd=None):
subproc = subprocess32.Popen(['sh'],
close_fds=True,
cwd=cwd,
stdin=subprocess32.PIPE,
stdout=subprocess32.PIPE,
stderr=subprocess32.PIPE)
out, err = subproc.communicate(script)
subproc.wait()
if subproc.returncode != 0:
print out
print err
return (subproc.returncode, out, err)
def read_file(fn):
with open(fn, 'r') as f:
return f.read()
def rm_file(fn):
try:
os.unlink(fn)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
class TestFileHandler(unittest.TestCase):
def test_concurrent_write_and_remove(self):
l = logutil.make_logger(base_dir='/tmp',
log_name='rolling',
log_fn='rolling.out',
level=logging.DEBUG,
fmt='message')
n = 10240
sess = {'running': True}
def _remove():
while sess['running']:
rm_file('/tmp/rolling.out')
th = threading.Thread(target=_remove)
th.daemon = True
th.start()
for ii in range(n):
l.debug('123')
sess['running'] = False
th.join()
class TestLogutil(unittest.TestCase):
def setUp(self):
rm_file('/tmp/t.out')
# root logger
logutil.make_logger(base_dir='/tmp',
log_fn='t.out',
level=logging.DEBUG,
fmt='message')
def test_get_root_log_fn(self):
# instant
code, out, err = subproc(
'python -c "from pykit import logutil; print logutil.get_root_log_fn()"')
self.assertEqual(0, code)
self.assertEqual('__instant_command__.out', out.strip())
code, out, err = subproc(
'echo "from pykit import logutil; print logutil.get_root_log_fn()" | python')
self.assertEqual(0, code)
self.assertEqual('__stdin__.out', out.strip())
# load by file
code, out, err = subproc(
'python foo.py', cwd=os.path.dirname(__file__))
self.assertEqual(0, code)
self.assertEqual('foo.out', out.strip())
def test_deprecate(self):
fmt = '{fn}::{ln} in {func}\n {statement}'
logutil.deprecate('foo', fmt=fmt, sep='\n')
cont = read_file('/tmp/t.out')
self.assertRegexpMatches(
cont,
'^Deprecated: foo')
self.assertRegexpMatches(
cont,
'test_logutil.py::\d+ in test_deprecate\n logutil.deprecate')
def test_stack_list(self):
stack = logutil.stack_list()
last = stack[-1]
self.assertEqual('test_logutil.py', os.path.basename(last[0]))
self.assertTrue(isinstance(last[1], int))
self.assertEqual('test_stack_list', last[2])
self.assertRegexpMatches(last[3], '^ *stack = ')
def test_format_stack(self):
cases = (
([('0', 1, 2, 3)], '0-1-2-3'),
([('0', 1, 2, 3),
('a', 'b', 'c', 'd')], '0-1-2-3\na-b-c-d'),
)
for inp, expected in cases:
rst = logutil.stack_format(
inp, fmt='{fn}-{ln}-{func}-{statement}', sep='\n')
self.assertEqual(expected, rst)
def test_stack_str(self):
rst = logutil.stack_str(fmt='{fn}-{ln}-{func}-{statement}', sep=' ')
self.assertRegexpMatches(
rst,
' test_logutil.py-\d+-test_stack_str- *rst = ')
def test_get_datefmt(self):
cases = (
(None, None),
('default', None),
('time', '%H:%M:%S'),
('%H%M%S', '%H%M%S'),
)
for inp, expected in cases:
rst = logutil.get_datefmt(inp)
self.assertEqual(expected, rst)
def test_get_fmt(self):
cases = (
(None,
'[%(asctime)s,%(process)d-%(thread)d,%(filename)s,%(lineno)d,%(levelname)s] %(message)s'),
('default',
'[%(asctime)s,%(process)d-%(thread)d,%(filename)s,%(lineno)d,%(levelname)s] %(message)s'),
('time_level', "[%(asctime)s,%(levelname)s] %(message)s"),
('message', '%(message)s'),
('%(message)s', '%(message)s'),
)
for inp, expected in cases:
rst = logutil.get_fmt(inp)
self.assertEqual(expected, rst)
def test_make_logger(self):
rm_file('/tmp/tt')
l = logutil.make_logger(base_dir='/tmp',
log_name='m',
log_fn='tt',
level='INFO',
fmt='%(message)s',
datefmt='%H%M%S'
)
l.debug('debug')
l.info('info')
cont = read_file('/tmp/tt').strip()
self.assertEqual(cont, 'info')
def test_make_logger_with_config(self):
code, out, err = subproc(
'python make_logger_with_config.py', cwd=os.path.dirname(__file__))
self.assertEqual(0, code)
self.assertEqual(out.strip(), 'info')
def test_make_formatter(self):
# how to test logging.Formatter?
pass
def test_make_file_handler(self):
rm_file('/tmp/handler_change')
l = logutil.make_logger(base_dir='/tmp',
log_name='h',
log_fn='dd',
level='INFO',
fmt='%(message)s',
datefmt='%H%M%S'
)
l.handlers = []
handler = logutil.make_file_handler(base_dir='/tmp',
log_fn='handler_change',
fmt='%(message)s',
datefmt='%H%M%S')
l.addHandler(handler)
l.debug('debug')
l.info('info')
cont = read_file('/tmp/handler_change').strip()
self.assertEqual(cont, 'info')
def test_make_file_handler_with_config(self):
code, out, err = subproc(
'python make_file_handler_with_config.py', cwd=os.path.dirname(__file__))
self.assertEqual(0, code)
self.assertEqual(out.strip(), 'info')
def test_add_std_handler(self):
rm_file('/tmp/stdlog')
code, out, err = subproc(
'python stdlog.py', cwd=os.path.dirname(__file__))
self.assertEqual(0, code)
self.assertEqual('error', out.strip())
def test_set_logger_level(self):
cases = (
(None, 'debug1\ndebug2'),
('1_prefix', 'debug1\ndebug2\ndebug2'),
(('1_prefix', '2_prefix'), 'debug1\ndebug2'),
(('not_exist',), 'debug1\ndebug2\ndebug1\ndebug2'),
(('not_exist', '1_prefix'), 'debug1\ndebug2\ndebug2'),
)
for inp, expected in cases:
rm_file('/tmp/ss')
logger1 = logutil.make_logger(base_dir='/tmp',
log_name='1_prefix_1',
log_fn='ss',
level='DEBUG',
fmt='%(message)s',
datefmt='%H%M%S')
logger2 = logutil.make_logger(base_dir='/tmp',
log_name='2_prefix_1',
log_fn='ss',
level='DEBUG',
fmt='%(message)s',
datefmt='%H%M%S')
logger1.debug('debug1')
logger2.debug('debug2')
logutil.set_logger_level(level='INFO', name_prefixes=inp)
logger1.debug('debug1')
logger2.debug('debug2')
content = read_file('/tmp/ss')
self.assertEqual(expected, content.strip())
|
test_acceptance.py
|
import unittest
import threading
import queue
import tempfile
import pathlib
from todo.app import TODOApp
from todo.db import BasicDB
class TestTODOAcceptance(unittest.TestCase):
def setUp(self):
self.inputs = queue.Queue()
self.outputs = queue.Queue()
self.fake_output = lambda txt: self.outputs.put(txt)
self.fake_input = lambda: self.inputs.get()
self.get_output = lambda: self.outputs.get(timeout=1)
self.send_input = lambda cmd: self.inputs.put(cmd)
def test_main(self):
app = TODOApp(io=(self.fake_input, self.fake_output))
app_thread = threading.Thread(target=app.run, daemon=True)
app_thread.start()
welcome = self.get_output()
self.assertEqual(welcome, (
"TODOs:\n"
"\n"
"\n"
"> "
))
self.send_input("add buy milk")
welcome = self.get_output()
self.assertEqual(welcome, (
"TODOs:\n"
"1. buy milk\n"
"\n"
"> "
))
self.send_input("add buy eggs")
welcome = self.get_output()
self.assertEqual(welcome, (
"TODOs:\n"
"1. buy milk\n"
"2. buy eggs\n"
"\n"
"> "
))
self.send_input("del 1")
welcome = self.get_output()
self.assertEqual(welcome, (
"TODOs:\n"
"1. buy eggs\n"
"\n"
"> "
))
self.send_input("quit")
app_thread.join(timeout=1)
self.assertEqual(self.get_output(), "bye!\n")
def test_persistence(self):
with tempfile.TemporaryDirectory() as tmpdirname:
app_thread = threading.Thread(
target=TODOApp(
io=(self.fake_input, self.fake_output),
dbmanager=BasicDB(pathlib.Path(tmpdirname, "db"))
).run,
daemon=True
)
app_thread.start()
# First time the app starts it's empty.
welcome = self.get_output()
self.assertEqual(welcome, (
"TODOs:\n"
"\n"
"\n"
"> "
))
self.send_input("add buy milk")
self.send_input("quit")
app_thread.join(timeout=1)
while True:
try:
self.get_output()
except queue.Empty:
break
app_thread = threading.Thread(
target=TODOApp(
io=(self.fake_input, self.fake_output),
dbmanager=BasicDB(pathlib.Path(tmpdirname, "db"))
).run,
daemon=True
)
app_thread.start()
welcome = self.get_output()
self.assertEqual(welcome, (
"TODOs:\n"
"1. buy milk\n"
"\n"
"> "
))
self.send_input("quit")
app_thread.join(timeout=1)
|
blast_ex1.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread, Event
import time
# import threading
import psutil
import datetime
import os
import subprocess
# sizes = [0.015625, 0.03125, 0.0625, 0.125, 0.25, 0.5]
size = 0.015625
# command = "blastn -db nt -evalue 1e-05 -query arquivo.fasta -out arquivoblast"
#monitor cpu and memory
# [1:09 PM, 3/14/2016] Mauro: Monitorar o desempenho das máquinas (se alcançam o máximo de CPU ou memória; se travam)
# [1:10 PM, 3/14/2016] Mauro: E verificar a relação (tamanho, no de reads, no de hits) entre os arquivos de entrada e saída.
# Raony, sugiro:
# 1) Pega 1 dos arquivos 'good', quebra ele em diferentes tamanhos: 50, 25, 12.5, 6.25, 3.125 1,5625% do original
# 2) Roda cada um em um webservice diferente, em instâncias padrão da AWS de aproximadamente 8, 20 e 50 Gb de RAM, com o processamento correspondente.
# 3) monitore: tempo de processamento em cada instância, uso médio da CPU e da RAM, tamanho do arquivo de saída.
# 4) quando fragmentar o arquivo inicial em pedaços de 6,25% do total, coloque 8 deles (~50%) na fila do mesmo webservice pra monitorar o tempo de execução e comparar com 1 arquivo de 50%
file_prefix = str(size).replace('.','_')
output = open("monitor_%s.log" % (file_prefix), "w")
def monitor(arg1, stop_event):
while(not stop_event.is_set()):
# time.sleep(60)
cpu = psutil.cpu_percent(interval=5)
mem = psutil.virtual_memory()
output_list = []
output_list.append("DATE:"+str(datetime.datetime.now()))
used = mem.total - mem.available
output_list.append("CPU:"+str(cpu))
output_list.append("MEMORY:"+str(int(used / 1024 / 1024))+" MB")
output.writelines("\t".join(output_list)+"\n")
print(output_list)
t2_stop= Event()
monitor = Thread(target=monitor, args=(2, t2_stop))
monitor.start()
#run blasts
# for size in sizes:
print("Running BLAST for %s \n" % (size))
output.writelines("Running BLAST for %s \n" % (size))
filename = "input_blast_%s.fasta" % (file_prefix)
command = "time /dados/raonygui/programs/ncbi-blast-2.3.0+/bin/blastn -db /dados/raonygui/blast/nt -evalue 1e-05 -query /dados/raonygui/blast_bench/input/%s -out blast_output_%s.fasta -num_threads 24" % (filename, file_prefix)
# command = "sleep 2"
out = subprocess.check_output(command.split())
print(out.decode("utf-8") )
output.writelines(out)
#stop monitor
t2_stop.set()
monitor.join()
output.close()
|
multipleprocess.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
class MultipleProcess(object):
@staticmethod
def process(func, *args):
processes = []
new_args = ()
for node in args[0]:
new_args = [args[0][node]] + [arg for arg in args[1:]]
p = multiprocessing.Process(target=func, args=(new_args))
processes.append(p)
if len(processes) == 4:
is_alive = True
for each in processes:
each.start()
begin = time.time()
while is_alive:
is_alive = False
for each in processes:
is_alive = is_alive or each.is_alive()
timeout = (time.time() - begin)
if timeout >= 5:
break
processes = []
for each in processes:
each.start()
is_alive = True
while is_alive:
is_alive = False
for each in processes:
is_alive = is_alive or each.is_alive()
|
test_tracer.py
|
# -*- coding: utf-8 -*-
"""
tests for Tracer and utilities.
"""
import contextlib
import multiprocessing
import os
from os import getpid
import threading
from unittest.case import SkipTest
import mock
import pytest
import six
import ddtrace
from ddtrace.constants import ENV_KEY
from ddtrace.constants import HOSTNAME_KEY
from ddtrace.constants import MANUAL_DROP_KEY
from ddtrace.constants import MANUAL_KEEP_KEY
from ddtrace.constants import ORIGIN_KEY
from ddtrace.constants import SAMPLING_PRIORITY_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.context import Context
from ddtrace.ext import priority
from ddtrace.ext import system
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.settings import Config
from ddtrace.tracer import Tracer
from ddtrace.tracer import _has_aws_lambda_agent_extension
from ddtrace.tracer import _in_aws_lambda
from tests.subprocesstest import run_in_subprocess
from tests.utils import TracerTestCase
from tests.utils import override_global_config
from ..utils import override_env
class TracerTestCases(TracerTestCase):
def test_tracer_vars(self):
span = self.trace("a", service="s", resource="r", span_type="t")
span.assert_matches(name="a", service="s", resource="r", span_type="t")
# DEV: Finish to ensure we don't leak `service` between spans
span.finish()
span = self.trace("a")
span.assert_matches(name="a", service=None, resource="a", span_type=None)
span.finish()
def test_tracer(self):
def _mix():
with self.trace("cake.mix"):
pass
def _bake():
with self.trace("cake.bake"):
pass
def _make_cake():
with self.trace("cake.make") as span:
span.service = "baker"
span.resource = "cake"
_mix()
_bake()
# let's run it and make sure all is well.
self.assert_has_no_spans()
_make_cake()
# Capture root's trace id to assert later
root_trace_id = self.get_root_span().trace_id
# Assert structure of this trace
self.assert_structure(
# Root span with 2 children
dict(name="cake.make", resource="cake", service="baker", parent_id=None),
(
# Span with no children
dict(name="cake.mix", resource="cake.mix", service="baker"),
# Span with no children
dict(name="cake.bake", resource="cake.bake", service="baker"),
),
)
# do it again and make sure it has new trace ids
self.reset()
_make_cake()
self.assert_span_count(3)
for s in self.spans:
assert s.trace_id != root_trace_id
def test_tracer_wrap(self):
@self.tracer.wrap("decorated_function", service="s", resource="r", span_type="t")
def f(tag_name, tag_value):
# make sure we can still set tags
span = self.tracer.current_span()
span.set_tag(tag_name, tag_value)
f("a", "b")
self.assert_span_count(1)
span = self.get_root_span()
span.assert_matches(
name="decorated_function",
service="s",
resource="r",
span_type="t",
meta=dict(a="b"),
)
def test_tracer_pid(self):
with self.trace("root") as root_span:
with self.trace("child") as child_span:
pass
# Root span should contain the pid of the current process
root_span.assert_metrics({system.PID: getpid()}, exact=False)
# Child span should not contain a pid tag
child_span.assert_metrics(dict(), exact=True)
def test_tracer_wrap_default_name(self):
@self.tracer.wrap()
def f():
pass
f()
self.assert_structure(dict(name="tests.tracer.test_tracer.f"))
def test_tracer_wrap_exception(self):
@self.tracer.wrap()
def f():
raise Exception("bim")
with self.assertRaises(Exception) as ex:
f()
self.assert_structure(
dict(
name="tests.test_tracer.f",
error=1,
meta={
"error.msg": ex.message,
"error.type": ex.__class__.__name__,
},
),
)
def test_tracer_wrap_multiple_calls(self):
@self.tracer.wrap()
def f():
pass
f()
f()
self.assert_span_count(2)
assert self.spans[0].span_id != self.spans[1].span_id
def test_tracer_wrap_span_nesting_current_root_span(self):
@self.tracer.wrap("inner")
def inner():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
@self.tracer.wrap("outer")
def outer():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
with self.trace("mid"):
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
inner()
outer()
def test_tracer_wrap_span_nesting(self):
@self.tracer.wrap("inner")
def inner():
pass
@self.tracer.wrap("outer")
def outer():
with self.trace("mid"):
inner()
outer()
self.assert_span_count(3)
self.assert_structure(
dict(name="outer"),
((dict(name="mid"), (dict(name="inner"),)),),
)
def test_tracer_wrap_class(self):
class Foo(object):
@staticmethod
@self.tracer.wrap()
def s():
return 1
@classmethod
@self.tracer.wrap()
def c(cls):
return 2
@self.tracer.wrap()
def i(cls):
return 3
f = Foo()
self.assertEqual(f.s(), 1)
self.assertEqual(f.c(), 2)
self.assertEqual(f.i(), 3)
self.assert_span_count(3)
self.spans[0].assert_matches(name="tests.tracer.test_tracer.s")
self.spans[1].assert_matches(name="tests.tracer.test_tracer.c")
self.spans[2].assert_matches(name="tests.tracer.test_tracer.i")
def test_tracer_wrap_factory(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
wrapped_function(42, kw_param=42)
self.assert_span_count(1)
self.spans[0].assert_matches(
name="wrap.overwrite",
meta=dict(args="(42,)", kwargs="{'kw_param': 42}"),
)
def test_tracer_wrap_factory_nested(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
with self.trace("wrap.parent", service="webserver"):
wrapped_function(42, kw_param=42)
self.assert_structure(
dict(name="wrap.parent", service="webserver"),
(dict(name="wrap.overwrite", service="webserver", meta=dict(args="(42,)", kwargs="{'kw_param': 42}")),),
)
def test_tracer_disabled(self):
self.tracer.enabled = True
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_spans()
self.reset()
self.tracer.enabled = False
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_no_spans()
def test_unserializable_span_with_finish(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
with self.trace("parent") as span:
span.metrics["as"] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak(self):
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
self.tracer.enabled = False
s1 = self.trace("foo")
s1.finish()
p1 = self.tracer.current_span()
s2 = self.trace("bar")
self.assertIsNone(s2._parent)
s2.finish()
self.assertIsNone(p1)
def test_tracer_global_tags(self):
s1 = self.trace("brie")
s1.finish()
self.assertIsNone(s1.get_tag("env"))
self.assertIsNone(s1.get_tag("other"))
self.tracer.set_tags({"env": "prod"})
s2 = self.trace("camembert")
s2.finish()
self.assertEqual(s2.get_tag("env"), "prod")
self.assertIsNone(s2.get_tag("other"))
self.tracer.set_tags({"env": "staging", "other": "tag"})
s3 = self.trace("gruyere")
s3.finish()
self.assertEqual(s3.get_tag("env"), "staging")
self.assertEqual(s3.get_tag("other"), "tag")
def test_global_context(self):
# the tracer uses a global thread-local Context
span = self.trace("fake_span")
ctx = self.tracer.get_call_context()
assert ctx.trace_id == span.trace_id
assert ctx.span_id == span.span_id
def test_tracer_current_span(self):
# the current span is in the local Context()
span = self.trace("fake_span")
assert self.tracer.current_span() == span
span.finish()
with self.trace("fake_span") as span:
assert self.tracer.current_span() == span
def test_tracer_current_span_missing_context(self):
self.assertIsNone(self.tracer.current_span())
def test_tracer_current_root_span_missing_context(self):
self.assertIsNone(self.tracer.current_root_span())
def test_default_provider_get(self):
# Tracer Context Provider must return a Context object
# even if empty
ctx = self.tracer.context_provider.active()
assert isinstance(ctx, Context)
def test_default_provider_set(self):
# The Context Provider can set the current active Context;
# this could happen in distributed tracing
ctx = Context(trace_id=42, span_id=100)
self.tracer.context_provider.activate(ctx)
span = self.trace("web.request")
span.assert_matches(name="web.request", trace_id=42, parent_id=100)
def test_start_span(self):
# it should create a root Span
span = self.tracer.start_span("web.request")
assert span.name == "web.request"
assert span.parent_id is None
span.finish()
spans = self.pop_spans()
assert len(spans) == 1
assert spans[0] is span
def test_start_span_optional(self):
# it should create a root Span with arguments
with self.start_span("web.request", service="web", resource="/", span_type="http") as span:
pass
span.assert_matches(
name="web.request",
service="web",
resource="/",
span_type="http",
)
def test_start_span_service_default(self):
span = self.start_span("")
span.assert_matches(service=None)
span.finish()
def test_start_span_service_from_parent(self):
with self.start_span("parent", service="mysvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="mysvc",
)
def test_start_span_service_global_config(self):
# When no service is provided a default
with self.override_global_config(dict(service="mysvc")):
with self.start_span("") as span:
span.assert_matches(service="mysvc")
def test_start_span_service_global_config_parent(self):
# Parent should have precedence over global config
with self.override_global_config(dict(service="mysvc")):
with self.start_span("parent", service="parentsvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="parentsvc",
)
def test_start_child_span(self):
# it should create a child Span for the given parent
with self.start_span("web.request") as parent:
assert self.tracer.current_span() is None
with self.start_span("web.worker", child_of=parent) as child:
assert self.tracer.current_span() is None
parent.assert_matches(
name="web.request",
parent_id=None,
_parent=None,
tracer=self.tracer,
)
child.assert_matches(
name="web.worker",
parent_id=parent.span_id,
_parent=parent,
tracer=self.tracer,
)
def test_start_child_span_attributes(self):
# it should create a child Span with parent's attributes
with self.start_span("web.request", service="web", resource="/", span_type="http") as parent:
with self.start_span("web.worker", child_of=parent) as child:
child.assert_matches(name="web.worker", service="web")
def test_start_child_from_context(self):
# it should create a child span with a populated Context
with self.start_span("web.request") as root:
with self.start_span("web.worker", child_of=root.context) as child:
pass
child.assert_matches(
name="web.worker",
parent_id=root.span_id,
trace_id=root.trace_id,
_parent=root,
tracer=self.tracer,
)
def test_adding_services(self):
assert self.tracer._services == set()
with self.start_span("root", service="one") as root:
assert self.tracer._services == set(["one"])
with self.start_span("child", service="two", child_of=root):
pass
assert self.tracer._services == set(["one", "two"])
def test_configure_dogstatsd_url_host_port(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
tracer = Tracer()
writer = AgentWriter("http://localhost:8126")
tracer.configure(writer=writer, dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
def test_configure_dogstatsd_url_socket(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
tracer = Tracer()
writer = AgentWriter("http://localhost:8126")
tracer.configure(writer=writer, dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
def test_tracer_url():
t = ddtrace.Tracer()
assert t.writer.agent_url == "http://localhost:8126"
t = ddtrace.Tracer(url="http://foobar:12")
assert t.writer.agent_url == "http://foobar:12"
t = ddtrace.Tracer(url="unix:///foobar")
assert t.writer.agent_url == "unix:///foobar"
t = ddtrace.Tracer(url="http://localhost")
assert t.writer.agent_url == "http://localhost"
t = ddtrace.Tracer(url="https://localhost")
assert t.writer.agent_url == "https://localhost"
with pytest.raises(ValueError) as e:
ddtrace.Tracer(url="foo://foobar:12")
assert (
str(e.value) == "Unsupported protocol 'foo' in Agent URL 'foo://foobar:12'. Must be one of: http, https, unix"
)
def test_tracer_shutdown_no_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
# The writer thread does not start until the first write.
t.shutdown()
assert t.writer.stop.called
assert not t.writer.join.called
# Do a write to start the writer.
with t.trace("something"):
pass
t.shutdown()
t.writer.stop.assert_has_calls(
[
mock.call(timeout=None),
mock.call(timeout=None),
]
)
def test_tracer_configure_writer_stop_unstarted():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Stop should be called when replacing the writer.
t.configure(hostname="localhost", port=8126)
assert orig_writer.stop.called
def test_tracer_configure_writer_stop_started():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Do a write to start the writer
with t.trace("something"):
pass
t.configure(hostname="localhost", port=8126)
orig_writer.stop.assert_called_once_with()
def test_tracer_shutdown_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
with t.trace("something"):
pass
t.shutdown(timeout=2)
t.writer.stop.assert_called_once_with(timeout=2)
def test_tracer_dogstatsd_url():
t = ddtrace.Tracer()
assert t.writer.dogstatsd.host == "localhost"
assert t.writer.dogstatsd.port == 8125
t = ddtrace.Tracer(dogstatsd_url="foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="udp://foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="/var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
t = ddtrace.Tracer(dogstatsd_url="unix:///var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(dogstatsd_url="foo://foobar:12")
assert str(e) == "Unknown url format for `foo://foobar:12`"
def test_tracer_fork():
t = ddtrace.Tracer()
original_pid = t._pid
original_writer = t.writer
@contextlib.contextmanager
def capture_failures(errors):
try:
yield
except AssertionError as e:
errors.put(e)
def task(t, errors):
# Start a new span to trigger process checking
with t.trace("test", service="test"):
# Assert we recreated the writer and have a new queue
with capture_failures(errors):
assert t._pid != original_pid
assert t.writer is not original_writer
assert t.writer._buffer is not original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 0
assert len(t.writer._buffer) == 1
# Assert tracer in a new process correctly recreates the writer
errors = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(t, errors))
try:
p.start()
finally:
p.join(timeout=2)
assert errors.empty(), errors.get()
# Ensure writing into the tracer in this process still works as expected
with t.trace("test", service="test"):
assert t._pid == original_pid
assert t.writer == original_writer
assert t.writer._buffer == original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 1
assert len(t.writer._buffer) == 1
def test_tracer_with_version():
t = ddtrace.Tracer()
# With global `config.version` defined
with override_global_config(dict(version="1.2.3")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "1.2.3"
# override manually
span.set_tag(VERSION_KEY, "4.5.6")
assert span.get_tag(VERSION_KEY) == "4.5.6"
# With no `config.version` defined
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) is None
# explicitly set in the span
span.set_tag(VERSION_KEY, "1.2.3")
assert span.get_tag(VERSION_KEY) == "1.2.3"
# With global tags set
t.set_tags({VERSION_KEY: "tags.version"})
with override_global_config(dict(version="config.version")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "config.version"
def test_tracer_with_env():
t = ddtrace.Tracer()
# With global `config.env` defined
with override_global_config(dict(env="prod")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "prod"
# override manually
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With no `config.env` defined
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) is None
# explicitly set in the span
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With global tags set
t.set_tags({ENV_KEY: "tags.env"})
with override_global_config(dict(env="config.env")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "config.env"
class EnvTracerTestCase(TracerTestCase):
"""Tracer test cases requiring environment variables."""
@run_in_subprocess(env_overrides=dict(DATADOG_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DATADOG_SERVICE_NAME(self):
"""
When DATADOG_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DD_SERVICE_NAME(self):
"""
When DD_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env(self):
with self.start_span("") as span:
pass
span.assert_matches(
service="mysvc",
)
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env_global_config(self):
# Global config should have higher precedence than the environment variable
with self.override_global_config(dict(service="overridesvc")):
with self.start_span("") as span:
pass
span.assert_matches(
service="overridesvc",
)
@run_in_subprocess(env_overrides=dict(DD_VERSION="0.1.2"))
def test_version_no_global_service(self):
# Version should be set if no service name is present
with self.trace("") as span:
span.assert_matches(
meta={
VERSION_KEY: "0.1.2",
},
)
# The version will not be tagged if the service is not globally
# configured.
with self.trace("root", service="rootsvc") as root:
assert VERSION_KEY not in root.meta
with self.trace("child") as span:
assert VERSION_KEY not in span.meta
@run_in_subprocess(env_overrides=dict(DD_SERVICE="django", DD_VERSION="0.1.2"))
def test_version_service(self):
# Fleshed out example of service and version tagging
# Our app is called django, we provide DD_SERVICE=django and DD_VERSION=0.1.2
with self.trace("django.request") as root:
# Root span should be tagged
assert root.service == "django"
assert VERSION_KEY in root.meta and root.meta[VERSION_KEY] == "0.1.2"
# Child spans should be tagged
with self.trace("") as child1:
assert child1.service == "django"
assert VERSION_KEY in child1.meta and child1.meta[VERSION_KEY] == "0.1.2"
# Version should not be applied to spans of a service that isn't user-defined
with self.trace("mysql.query", service="mysql") as span:
assert VERSION_KEY not in span.meta
# Child should also not have a version
with self.trace("") as child2:
assert child2.service == "mysql"
assert VERSION_KEY not in child2.meta
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agentless_env_with_lambda(self):
assert _in_aws_lambda()
assert not _has_aws_lambda_agent_extension()
tracer = Tracer()
assert isinstance(tracer.writer, LogWriter)
tracer.configure(enabled=True)
assert isinstance(tracer.writer, LogWriter)
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agent_config_with_lambda_extension(self):
def mock_os_path_exists(path):
return path == "/opt/extensions/datadog-agent"
assert _in_aws_lambda()
with mock.patch("os.path.exists", side_effect=mock_os_path_exists):
assert _has_aws_lambda_agent_extension()
tracer = Tracer()
assert isinstance(tracer.writer, AgentWriter)
assert tracer.writer._sync_mode
tracer.configure(enabled=False)
assert isinstance(tracer.writer, AgentWriter)
assert tracer.writer._sync_mode
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost"))
def test_detect_agent_config(self):
tracer = Tracer()
assert isinstance(tracer.writer, AgentWriter)
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2"))
def test_dd_tags(self):
assert self.tracer.tags["key1"] == "value1"
assert self.tracer.tags["key2"] == "value2"
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2,key3"))
def test_dd_tags_invalid(self):
assert "key1" in self.tracer.tags
assert "key2" in self.tracer.tags
assert "key3" not in self.tracer.tags
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "mysvc"
assert s.get_tag("env") == "myenv"
assert s.get_tag("version") == "myvers"
@run_in_subprocess(
env_overrides=dict(
DD_TAGS="service:s,env:e,version:v",
DD_ENV="env",
DD_SERVICE="svc",
DD_VERSION="0.123",
)
)
def test_tags_from_DD_TAGS_precedence(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "svc"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS_override(self):
t = ddtrace.Tracer()
ddtrace.config.env = "env"
ddtrace.config.service = "service"
ddtrace.config.version = "0.123"
with t.trace("test") as s:
assert s.service == "service"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
def test_tracer_set_runtime_tags():
t = ddtrace.Tracer()
with t.start_span("foobar") as span:
pass
assert len(span.get_tag("runtime-id"))
t2 = ddtrace.Tracer()
with t2.start_span("foobaz") as span2:
pass
assert span.get_tag("runtime-id") == span2.get_tag("runtime-id")
def test_tracer_runtime_tags_fork():
tracer = ddtrace.Tracer()
def task(tracer, q):
span = tracer.start_span("foobaz")
q.put(span.get_tag("runtime-id"))
span.finish()
span = tracer.start_span("foobar")
span.finish()
q = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children_tag = q.get()
assert children_tag != span.get_tag("runtime-id")
def test_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
span = t.start_span("hello")
assert span == result["span"]
span.finish()
def test_deregister_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
t.deregister_on_start_span(store_span)
with t.start_span("hello"):
pass
assert result == {}
def test_enable(monkeypatch):
t1 = ddtrace.Tracer()
assert t1.enabled
monkeypatch.setenv("DD_TRACE_ENABLED", "false")
t2 = ddtrace.Tracer()
assert not t2.enabled
def test_runtime_id_parent_only():
tracer = ddtrace.Tracer()
# Parent spans should have runtime-id
s = tracer.trace("test")
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
# Child spans should not
s2 = tracer.trace("test2")
assert s2.get_tag("runtime-id") is None
s2.finish()
s.finish()
# Parent spans should have runtime-id
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
def test_runtime_id_fork():
tracer = ddtrace.Tracer()
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
pid = os.fork()
if pid == 0:
# child
s = tracer.trace("test")
s.finish()
rtid_child = s.get_tag("runtime-id")
assert isinstance(rtid_child, six.string_types)
assert rtid != rtid_child
os._exit(12)
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_multiple_tracer_ctx():
t1 = ddtrace.Tracer()
t2 = ddtrace.Tracer()
with t1.trace("") as s1:
with t2.trace("") as s2:
pass
assert s2.parent_id == s1.span_id
assert s2.trace_id == s1.trace_id
def test_filters(tracer, test_spans):
class FilterAll(object):
def process_trace(self, trace):
return None
tracer.configure(
settings={
"FILTERS": [FilterAll()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 0
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
s1, s2 = spans
assert s1.get_tag("boop") == "beep"
assert s2.get_tag("boop") == "beep"
# Test multiple filters
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
assert s.get_tag("mats") == "sundin"
class FilterBroken(object):
def process_trace(self, trace):
_ = 1 / 0
tracer.configure(
settings={
"FILTERS": [FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
def test_early_exit(tracer, test_spans):
s1 = tracer.trace("1")
s2 = tracer.trace("2")
s1.finish()
with mock.patch("ddtrace.context.log") as log:
s2.finish()
calls = [
mock.call("span %r closing after its parent %r, this is an error when not using async", s2, s1),
]
log.debug.assert_has_calls(calls)
assert s1.parent_id is None
assert s2.parent_id is s1.span_id
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
s1 = tracer.trace("1-1")
s1.finish()
assert s1.parent_id is None
s1 = tracer.trace("1-2")
s1.finish()
assert s1.parent_id is None
class TestPartialFlush(TracerTestCase):
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="5")
)
def test_partial_flush(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 5
assert [s.name for s in traces[0]] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="1")
)
def test_partial_flush_too_many(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 5
for t in traces:
assert len(t) == 1
assert [t[0].name for t in traces] == ["child0", "child1", "child2", "child3", "child4"]
for t in traces:
assert t[0].parent_id == root.span_id
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_too_few(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 0
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert [s.name for s in traces[0]] == ["root", "child0", "child1", "child2", "child3", "child4"]
def test_partial_flush_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5)
self.test_partial_flush()
def test_partial_flush_too_many_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=1)
self.test_partial_flush_too_many()
def test_partial_flush_too_few_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=6)
self.test_partial_flush_too_few()
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="false", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_configure_precedence(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5)
self.test_partial_flush()
def test_unicode_config_vals():
t = ddtrace.Tracer()
with override_global_config(dict(version=u"😇", env=u"😇")):
with t.trace("1"):
pass
t.shutdown()
def test_ctx(tracer, test_spans):
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test3") as s3:
assert tracer.current_span() == s3
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s3.span_id
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test4") as s4:
assert tracer.current_span() == s4
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s4.span_id
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert s1.parent_id is None
assert s2.parent_id == s1.span_id
assert s3.parent_id == s2.span_id
assert s4.parent_id == s1.span_id
assert s1.trace_id == s2.trace_id == s3.trace_id == s4.trace_id
assert s1.metrics[SAMPLING_PRIORITY_KEY] == 1
assert SAMPLING_PRIORITY_KEY not in s2.metrics
assert ORIGIN_KEY not in s1.meta
t = test_spans.pop_traces()
assert len(t) == 1
assert len(t[0]) == 4
_s1, _s2, _s3, _s4 = t[0]
assert s1 == _s1
assert s2 == _s2
assert s3 == _s3
assert s4 == _s4
with tracer.trace("s") as s:
assert s.parent_id is None
assert s.trace_id != s1.trace_id
def test_multithreaded(tracer, test_spans):
def target():
with tracer.trace("s1"):
with tracer.trace("s2"):
pass
with tracer.trace("s3"):
pass
for i in range(1000):
ts = [threading.Thread(target=target) for _ in range(10)]
for t in ts:
t.start()
for t in ts:
t.join()
traces = test_spans.pop_traces()
assert len(traces) == 10
for trace in traces:
assert len(trace) == 3
def test_ctx_distributed(tracer, test_spans):
# Test activating an invalid context.
ctx = Context(span_id=None, trace_id=None)
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
assert s1.parent_id is None
trace = test_spans.pop_traces()
assert len(trace) == 1
# Test activating a valid context.
ctx = Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s2
assert tracer.get_call_context().trace_id == s2.trace_id == 4321
assert tracer.get_call_context().span_id == s2.span_id
assert s2.parent_id == 1234
trace = test_spans.pop_traces()
assert len(trace) == 1
assert s2.metrics[SAMPLING_PRIORITY_KEY] == 2
assert s2.meta[ORIGIN_KEY] == "somewhere"
def test_manual_keep(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
def test_manual_keep_then_drop(tracer, test_spans):
# Test changing the value before finish.
with tracer.trace("asdf") as root:
with tracer.trace("child") as child:
child.set_tag(MANUAL_KEEP_KEY)
root.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_manual_drop(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_enabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=True)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) == "test-hostname"
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_disabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_default(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
def test_service_mapping():
@contextlib.contextmanager
def override_service_mapping(service_mapping):
with override_env(dict(DD_SERVICE_MAPPING=service_mapping)):
assert ddtrace.config.service_mapping == {}
ddtrace.config.service_mapping = Config().service_mapping
yield
ddtrace.config.service_mapping = {}
# Test single mapping
with override_service_mapping("foo:bar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "bar"
# Test multiple mappings
with override_service_mapping("foo:bar,sna:fu"), ddtrace.Tracer().trace("renaming", service="sna") as span:
assert span.service == "fu"
# Test colliding mappings
with override_service_mapping("foo:bar,foo:foobar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "foobar"
# Test invalid service mapping
with override_service_mapping("foo;bar,sna:fu"):
with ddtrace.Tracer().trace("passthru", service="foo") as _:
assert _.service == "foo"
with ddtrace.Tracer().trace("renaming", "sna") as _:
assert _.service == "fu"
def test_configure_url_partial():
tracer = ddtrace.Tracer()
tracer.configure(hostname="abc")
assert tracer.writer.agent_url == "http://abc:8126"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer = ddtrace.Tracer(url="http://abc")
assert tracer.writer.agent_url == "http://abc"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer.configure(port=431)
assert tracer.writer.agent_url == "http://abc:431"
def test_bad_agent_url(monkeypatch):
with pytest.raises(ValueError):
Tracer(url="bad://localhost:8126")
monkeypatch.setenv("DD_TRACE_AGENT_URL", "bad://localhost:1234")
with pytest.raises(ValueError) as e:
Tracer()
assert (
str(e.value)
== "Unsupported protocol 'bad' in Agent URL 'bad://localhost:1234'. Must be one of: http, https, unix"
)
monkeypatch.setenv("DD_TRACE_AGENT_URL", "unix://")
with pytest.raises(ValueError) as e:
Tracer()
assert str(e.value) == "Invalid file path in Agent URL 'unix://'"
monkeypatch.setenv("DD_TRACE_AGENT_URL", "http://")
with pytest.raises(ValueError) as e:
Tracer()
assert str(e.value) == "Invalid hostname in Agent URL 'http://'"
def test_context_priority(tracer, test_spans):
"""Assigning a sampling_priority should not affect if the trace is sent to the agent"""
for p in [priority.USER_REJECT, priority.AUTO_REJECT, priority.AUTO_KEEP, priority.USER_KEEP, None, 999]:
with tracer.trace("span_%s" % p) as span:
span.context.sampling_priority = p
# Spans should always be written regardless of sampling priority since
# the agent needs to know the sampling decision.
spans = test_spans.pop()
assert len(spans) == 1, "trace should be sampled"
if p in [priority.USER_REJECT, priority.AUTO_REJECT, priority.AUTO_KEEP, priority.USER_KEEP]:
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] == p
def test_spans_sampled_out(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
spans = test_spans.pop()
assert len(spans) == 0
def test_spans_sampled_one(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = True
spans = test_spans.pop()
assert len(spans) == 3
def test_spans_sampled_all(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = True
with tracer.trace("child") as span:
span.sampled = True
with tracer.trace("child") as span:
span.sampled = True
spans = test_spans.pop()
assert len(spans) == 3
|
command.py
|
import signal
import subprocess
import threading
SIGNALS = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
class Command:
DEFAULT_ENV = {}
DEFAULT_TIMEOUT = 600.
DEFAULT_STDIN = subprocess.PIPE
DEFAULT_STDOUT = subprocess.PIPE
DEFAULT_STDERR = subprocess.PIPE
def __init__(self, cmd, cwd, shell=True, env=DEFAULT_ENV,
stdin=DEFAULT_STDIN, stdout=DEFAULT_STDOUT,
stderr=DEFAULT_STDERR):
self.cmd = cmd
self.cwd = cwd
self.env = env
self.ps = None
self.shell = shell
self.stderr = stderr
self.stdin = stdin
self.stdout = stdout
def open(self):
return subprocess.Popen(self.cmd, shell=self.shell,
env=self.env, cwd=self.cwd,
stdin=self.stdin, stdout=self.stdout,
stderr=self.stderr)
def run(self, instr=None, timeout=DEFAULT_TIMEOUT):
def target():
self.ps = subprocess.Popen(self.cmd, shell=self.shell,
env=self.env, cwd=self.cwd,
stdin=self.stdin, stdout=self.stdout,
stderr=self.stderr)
self.out, self.err = self.ps.communicate(input=instr)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.ps.terminate()
thread.join()
if self.ps.returncode < 0:
sig = SIGNALS[-self.ps.returncode]
elif self.ps.returncode == 126:
sig = 'Cannot execute command invoked'
elif self.ps.returncode == 127:
sig = 'Command not found'
elif self.ps.returncode == 128:
sig = 'Invalid argument to exit'
elif self.ps.returncode - 128 in SIGNALS:
sig = SIGNALS[self.ps.returncode - 128]
else:
sig = self.ps.returncode
return sig, self.out.decode('utf-8'), self.err.decode('utf-8')
|
gil_demo.py
|
"""
@file: gil_demo.py
@author: magician
@date: 2019/7/23
"""
import dis
import sys
import threading
from threading import Thread
from python_core.decorator_demo import log_execution_time
def count_down(n):
"""
count_down
:param n:
:return:
"""
while n > 0:
n -= 1
@log_execution_time
def main():
"""
main
:return:
"""
n = 100000000
t1 = Thread(target=count_down, args=[n // 2])
t2 = Thread(target=count_down, args=[n // 2])
t1.start()
t2.start()
t1.join()
t2.join()
def count_variable():
"""
count_variable
:return:
"""
a = []
b = a
print(sys.getrefcount(a))
n = 0
lock = threading.Lock()
def foo():
"""
foo
:return:
"""
global n
with lock:
n += 1
def count():
"""
count
:return:
"""
threads = []
for i in range(100):
t = threading.Thread(target=foo)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(n)
if __name__ == '__main__':
# main()
count_variable()
count()
dis.dis(foo)
|
demo.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import sys
import time
from itertools import product as itr_prod
from threading import Thread
import cv2
import numpy as np
# HACK: cross py2-py3 compatible version
try:
from queue import Queue
except ImportError:
from Queue import Queue
COLORS = [tuple(p) for p in itr_prod([0, 180, 255], repeat=3)]
COLORS = COLORS[1:]
def ltwh_to__tblr(ltwh):
l, t, w, h = ltwh.tolist()
b = int(t + h)
r = int(l + w)
return t, b, l, r
def add_fps(orig, fps):
f_p_s_text = "FPS: {:.1f}".format(fps)
text_color = (255, 144, 30)
orig_h, orig_w = orig.shape[:2]
cv2.putText(orig, f_p_s_text, (10, orig_h - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 1)
return orig
def check_range(upper, lower, checked_val):
if upper < checked_val:
checked_val = upper
elif lower > checked_val:
checked_val = lower
return checked_val
def add_rectangle(classes, orig, preds, pred_shape):
orig_h, orig_w = orig.shape[:2]
locs = [pred[:, 0:4] for pred in preds]
labels_n = np.array([pred[:, 4] for pred in preds]).astype(np.int) # TODO magic-number
labels_n = labels_n.flatten()
labels = [classes[i_label] for i_label in labels_n]
scores = preds[0][:, 5]
pred_h, pred_w = pred_shape
w_scale = orig_w / pred_w
h_scale = orig_h / pred_h
locs = (np.array(locs).reshape((-1, 4)) * [w_scale, h_scale, w_scale, h_scale]).astype(int)
for idx, loc in enumerate(locs):
t, b, le, r = ltwh_to__tblr(loc)
le = check_range(orig_w, 0, le)
r = check_range(orig_w, 0, r)
t = check_range(orig_h, 0, t)
b = check_range(orig_h, 0, b)
color_r = COLORS[labels_n[idx] % len(COLORS)]
thick = 2
label_text = "{} : {:.1f}%".format(labels[idx], scores[idx] * 100)
label_size, baseline = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(orig, (le, t), (r, b), color_r, thick)
max_color = max(color_r)
text_color = (255, 255, 255) if max_color < 255 else (0, 0, 0)
cv2_filed_config = cv2.cv.CV_FILLED if hasattr(cv2, 'cv') else cv2.FILLED
cv2.rectangle(orig, (le, t), (le + label_size[0], t + label_size[1]), color_r, cv2_filed_config)
cv2.putText(orig, label_text, (le, t + label_size[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
return orig
class VideoStream:
def __init__(self, video_source, video_width, video_height, video_fps, queue_size=1):
self.video_fps = video_fps
vc = cv2.VideoCapture(video_source)
if hasattr(cv2, 'cv'):
vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, video_width)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, video_height)
vc.set(cv2.cv.CV_CAP_PROP_FPS, video_fps)
else:
vc.set(cv2.CAP_PROP_FRAME_WIDTH, video_width)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, video_height)
vc.set(cv2.CAP_PROP_FPS, video_fps)
self.stream = vc
self.stopped = False
self.queue = Queue(maxsize=queue_size)
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
if self.stopped:
break
(flg, frame) = self.stream.read()
if not flg:
Exception("Video capture is wrong")
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.queue.full():
time.sleep(1/float(self.video_fps))
else:
if not self.queue.empty():
self.queue.get()
self.queue.put(frame)
else:
self.queue.put(frame)
self.stream.release()
def read(self):
return self.queue.get()
def release(self):
self.stopped = True
self.thread.join()
def run_inference(image, nn, pre_process, post_process):
if sys.version_info.major == 2:
get_time = time.time
else:
get_time = time.perf_counter
start = get_time()
data = pre_process(image=image)["image"]
data = np.expand_dims(data, axis=0)
network_only_start = get_time()
result = nn.run(data)
fps_only_network = 1.0 / (get_time() - network_only_start)
output = post_process(outputs=result)['outputs']
fps = 1.0 / (get_time() - start)
return output, fps, fps_only_network
|
pocas.py
|
#포스코 청년 AI Bigdata 아카데미 A반 4조
#온라인 시험 부정 행위 방지 시스템
############POSCAS############
#https://github.com/JoyLeeA
# 자세한 설명 수록
#### FOR FACE IDENTIFY ####
import numpy as np
from keras.models import load_model
from mtcnn.mtcnn import MTCNN
from PIL import Image
from sklearn.svm import SVC
from faceidentify.SVMclassifier import model as svm
from faceidentify.SVMclassifier import out_encoder
#### FOR GAZE AND MOTION ####
import argparse
import cv2
import os.path as osp
from detectheadposition import headpose
from gaze_tracking import GazeTracking
#### FOR WARNING ####
import pygame # For play Sound
import time # For sleep
import threading #For multi thread
# from tkinter import *
# import tkinter.messagebox
# def Msgbox1():
# tkinter.messagebox.showwarning("경고", "집중하세요")
# Warning Sound
def Sound():
pygame.mixer.init()
music = pygame.mixer.Sound("Warning/warning.wav")
music.play()
time.sleep(5)
# Settle Cheater
def Fail(timee, redcard):
if redcard >= timee/3:
print("===부정행위자 입니다===")
# get the face embedding for one face
def get_embedding(model, face_pixels):
# scale pixel values
face_pixels = face_pixels.astype('float32')
# standardize pixel values across channels (global)
mean, std = face_pixels.mean(), face_pixels.std()
face_pixels = (face_pixels - mean) / std
#print(face_pixels.shape)
# transform face into one sample
#expand dims adds a new dimension to the tensor
samples = np.expand_dims(face_pixels, axis=0)
#print(samples.shape)
# make prediction to get embedding
yhat = model.predict(samples)
return yhat[0]
# Print Result
def PrintResult(x, y):
print("###############--RESULT--#################")
print("yellocard:", x, "/ redcard", y)
print("###########################################")
# point can't get negative
def notnegative(x):
if x < 0:
return 0
else:
return x
# main function
def main(args):
filename = args["input_file"]
faceCascade = cv2.CascadeClassifier('models/haarcascade_frontalface_default.xml')
model = load_model('models/facenet_keras.h5')
if filename is None:
isVideo = False
webcam = cv2.VideoCapture(0)
webcam.set(3, args['wh'][0])
webcam.set(4, args['wh'][1])
else:
isVideo = True
webcam = cv2.VideoCapture(filename)
fps = webcam.get(cv2.webcam_PROP_FPS)
width = int(webcam.get(cv2.webcam_PROP_FRAME_WIDTH))
height = int(webcam.get(cv2.webcam_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
name, ext = osp.splitext(filename)
out = cv2.VideoWriter(args["output_file"], fourcc, fps, (width, height))
# Variable Setting
hpd = headpose.HeadposeDetection(args["landmark_type"], args["landmark_predictor"]) #import headpose
gaze = GazeTracking() # import gazetracking
yellocard = 0
redcard = 0
tempval = 0
timee = int(input("시험 시간을 입력하세요(Minute): ")) # Input time for limit test time
max_time_end = time.time() + (60 * timee)
# Infinity Loop for Detect Cheating for Online test
while(webcam.isOpened()):
ret, frame = webcam.read() # Read wabcam
gaze.refresh(frame)
frame = gaze.annotated_frame() # Mark pupil for frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=3,
minSize=(30, 30),
flags= cv2.CASCADE_SCALE_IMAGE) # face structure
# Get point from pupil
if gaze.is_blinking():
yellocard = yellocard - 1
yellocard = notnegative(yellocard)
elif gaze.is_right():
yellocard = yellocard - 1
yellocard = notnegative(yellocard)
elif gaze.is_left():
yellocard = yellocard - 1
yellocard = notnegative(yellocard)
elif gaze.is_center():
yellocard = yellocard - 1
yellocard = notnegative(yellocard)
else:
yellocard = yellocard + 2
# Get redcard optiom
if yellocard > 50:
yellocard = 0
tempval = tempval + 1
redcard = redcard + 1
# if get 1redcard, then give Aural and Text Warning(Loop)
if tempval == 1:
text1 = "WARNING"
cv2.putText(frame, text1, (10, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6,(0, 0, 255),2)
my_thread = threading.Thread(target = Sound)
my_thread.start()
tempval = 0
# if you are not GPU environment, Do not run this code by # --------------
# if get 2redcard, then give Picture Warning(Once)
if redcard == 2:
warn_img = cv2.imread("Warning/warning.png", cv2.IMREAD_COLOR)
cv2.imshow('Warning',warn_img)
cv2.waitKey(1)
redcard=2.1
# -----------------------------------------------------------------------
# Get log consistently
print("<< *의심수준:" , yellocard," || ", "*경고횟수:", redcard, " >>")
#Detect head position
if isVideo:
frame, angles = hpd.process_image(frame)
if frame is None:
break
else:
out.write(frame)
else:
frame, angles = hpd.process_image(frame)
if angles is None :
pass
else : #angles = [x,y,z] , get point from headposition
if angles[0]>15 or angles[0] <-15 or angles[1]>15 or angles[1] <-15 or angles[2]>15 or angles[2] <-15:
yellocard = yellocard + 2
else:
yellocard = yellocard - 1
yellocard = notnegative(yellocard)
yellocard = yellocard + hpd.yello(frame)
if yellocard <0:
yellocard = notnegative(yellocard)
# Draw a rectangle around the faces and predict the face name
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) # take the face pixels from the frame
crop_frame = frame[y:y+h, x:x+w] # turn the face pixels back into an image
new_crop = Image.fromarray(crop_frame) # resize the image to meet the size requirment of facenet
new_crop = new_crop.resize((160, 160)) # turn the image back into a tensor
crop_frame = np.asarray(new_crop) # get the face embedding using the face net model
face_embed = get_embedding(model, crop_frame) # it is a 1d array need to reshape it as a 2d tensor for svm
face_embed = face_embed.reshape(-1, face_embed.shape[0]) # predict using our SVM model
pred = svm.predict(face_embed) # get the prediction probabiltiy
pred_prob = svm.predict_proba(face_embed) # pred_prob has probabilities of each class
# get name
class_index = pred[0]
class_probability = pred_prob[0,class_index] * 100
predict_names = out_encoder.inverse_transform(pred)
text = '%s (%.3f%%)' % (predict_names[0], class_probability)
#add the name to frame but only if the pred is above a certain threshold
if (class_probability > 70):
cv2.putText(frame, text, (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# Display the resulting frame
cv2.imshow('POCAS', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
print("관리자에 의해 시험이 강제 종료 되었습니다")
PrintResult(yellocard, redcard)
Fail(timee, redcard)
break
elif time.time() > max_time_end:
print(timee, "분의 시험이 종료되었습니다.")
PrintResult(yellocard, redcard)
Fail(timee, redcard)
break
# When everything done, release the webcam
webcam.release()
if isVideo:
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', metavar='FILE', dest='input_file', default=None, help='Input video. If not given, web camera will be used.')
parser.add_argument('-o', metavar='FILE', dest='output_file', default=None, help='Output video.')
parser.add_argument('-wh', metavar='N', dest='wh', default=[720, 480], nargs=2, help='Frame size.')
parser.add_argument('-lt', metavar='N', dest='landmark_type', type=int, default=1, help='Landmark type.')
parser.add_argument('-lp', metavar='FILE', dest='landmark_predictor', default='gaze_tracking/trained_models/shape_predictor_68_face_landmarks.dat', help="Landmark predictor data file.")
args = vars(parser.parse_args())
main(args)
|
tui.py
|
# MIT License
# Copyright (C) Michael Tao-Yi Lee (taoyil AT UCI EDU)
from multiprocessing import Process, Pipe
def tui_main(addr=None, ch1=True, ch2=True):
from uci_cbp_demo.ui.terminal import TerminalManager
from uci_cbp_demo.backend import SensorBoard
pipe_1, pipe_2 = Pipe()
tm = TerminalManager(pipe_1)
sensor = SensorBoard(addr=addr, pipe=pipe_2)
process = Process(target=sensor.start_session)
process.start()
if ch1:
pipe_1.send(("CH1", None))
if ch2:
pipe_1.send(("CH2", None))
tm.handle_session()
|
ansible_handler_processor.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
import concurrent.futures
import logging
import multiprocessing
import os
import threading
import time
import traceback
from fabric_cf.actor.core.common.constants import Constants
from fabric_cf.actor.core.common.exceptions import AuthorityException
from fabric_cf.actor.core.plugins.handlers.config_token import ConfigToken
from fabric_cf.actor.core.plugins.handlers.handler_processor import HandlerProcessor
from fabric_cf.actor.core.util.log_helper import LogHelper
from fabric_cf.actor.core.util.reflection_utils import ReflectionUtils
process_pool_logger = None
class AnsibleHandlerProcessor(HandlerProcessor):
"""
Ansible Handler Processor
"""
MAX_WORKERS = 10
def __init__(self):
super().__init__()
from fabric_cf.actor.core.container.globals import GlobalsSingleton
self.log_config = GlobalsSingleton.get().get_log_config()
self.executor = None
self.process_pool_manager = None
self.process_pool_lock = None
self.__setup_process_pool()
self.futures = []
self.thread = None
self.future_lock = threading.Condition()
self.stopped = False
def __getstate__(self):
state = self.__dict__.copy()
del state['logger']
del state['plugin']
del state['initialized']
del state['lock']
del state['executor']
del state['process_pool_manager']
del state['process_pool_lock']
del state['futures']
del state['thread']
del state['future_lock']
del state['stopped']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.logger = None
self.plugin = None
self.initialized = False
self.lock = threading.Lock()
self.__setup_process_pool()
self.futures = []
self.thread = None
self.future_lock = threading.Condition()
self.stopped = False
def __setup_process_pool(self):
log_dir = self.log_config.get(Constants.PROPERTY_CONF_LOG_DIRECTORY, ".")
log_file = self.log_config.get(Constants.PROPERTY_CONF_HANDLER_LOG_FILE, "handler.log")
log_level = self.log_config.get(Constants.PROPERTY_CONF_LOG_LEVEL, logging.DEBUG)
log_retain = int(self.log_config.get(Constants.PROPERTY_CONF_LOG_RETAIN, 50))
log_size = int(self.log_config.get(Constants.PROPERTY_CONF_LOG_SIZE, 5000000))
logger = self.log_config.get(Constants.PROPERTY_CONF_LOGGER, "handler")
logger = f"{logger}-handler"
self.executor = concurrent.futures.ProcessPoolExecutor(max_workers=self.MAX_WORKERS,
initializer=AnsibleHandlerProcessor.process_pool_initializer,
initargs=(log_dir, log_file, log_level, log_retain,
log_size, logger))
self.process_pool_manager = multiprocessing.Manager()
self.process_pool_lock = self.process_pool_manager.Lock()
def start(self):
"""
Start the Future Processor Thread
"""
self.thread = threading.Thread(target=self.process_futures)
self.thread.setName("FutureProcessor")
self.thread.setDaemon(True)
self.thread.start()
def shutdown(self):
"""
Shutdown Process Pool, Future Processor
"""
try:
self.lock.acquire()
self.executor.shutdown(wait=True)
self.stopped = True
for f, u in self.futures:
f.cancel()
temp = self.thread
self.thread = None
if temp is not None:
self.logger.warning("It seems that the future thread is running. Interrupting it")
try:
with self.future_lock:
self.future_lock.notify_all()
temp.join()
except Exception as e:
self.logger.error("Could not join future thread {}".format(e))
self.logger.error(traceback.format_exc())
finally:
self.lock.release()
except Exception as e:
self.logger.error(f"Exception occurred {e}")
self.logger.error(traceback.format_exc())
finally:
if self.lock.locked():
self.lock.release()
def set_logger(self, *, logger):
self.logger = logger
def queue_future(self, future: concurrent.futures.Future, unit: ConfigToken):
"""
Queue an future on Future Processor timer queue
@param future
@param unit Unit being processed; This is needed
"""
with self.future_lock:
self.futures.append((future, unit))
self.logger.debug("Added future to Future queue")
self.future_lock.notify_all()
def invoke_handler(self, unit: ConfigToken, operation: str):
try:
handler = self.config_mappings.get(str(unit.get_resource_type()), None)
if handler is None:
raise AuthorityException(f"No handler found for resource type {unit.get_resource_type()}")
future = self.executor.submit(self.process_pool_main, operation, handler.get_class_name(),
handler.get_module_name(), handler.get_properties(), unit,
self.process_pool_lock)
self.queue_future(future=future, unit=unit)
self.logger.debug(f"Handler operation {operation} scheduled for Resource Type: {unit.get_resource_type()} "
f"Unit: {unit.get_id()} Reservation: {unit.get_reservation_id()}")
except Exception as e:
self.logger.error(f"Exception occurred {e}")
self.logger.error(traceback.format_exc())
result = {Constants.PROPERTY_TARGET_NAME: operation,
Constants.PROPERTY_TARGET_RESULT_CODE: Constants.RESULT_CODE_EXCEPTION,
Constants.PROPERTY_ACTION_SEQUENCE_NUMBER: 0}
self.handler_complete(unit=unit, properties=result, old_unit=unit)
finally:
self.logger.info(f"Executing {operation} completed")
def create(self, unit: ConfigToken):
self.invoke_handler(unit=unit, operation=Constants.TARGET_CREATE)
def modify(self, unit: ConfigToken):
self.invoke_handler(unit=unit, operation=Constants.TARGET_MODIFY)
def delete(self, unit: ConfigToken):
self.invoke_handler(unit=unit, operation=Constants.TARGET_DELETE)
def handler_complete(self, properties: dict, unit: ConfigToken, old_unit: ConfigToken):
try:
self.lock.acquire()
self.logger.debug(f"Properties: {properties} Unit: {unit}")
# Copy the sliver from the Unit to
old_unit.update_sliver(sliver=unit.get_sliver())
self.plugin.configuration_complete(token=old_unit, properties=properties)
except Exception as e:
self.logger.error(f"Exception occurred {e}")
self.logger.error(traceback.format_exc())
finally:
self.lock.release()
def process(self, future: concurrent.futures.Future, old_unit: ConfigToken):
try:
self.logger.debug(f"Handler Execution completed Result: {future.result()}")
if future.exception() is not None:
self.logger.error(f"Exception occurred while executing the handler: {future.exception()}")
properties, unit = future.result()
self.handler_complete(properties=properties, unit=unit, old_unit=old_unit)
except Exception as e:
self.logger.error(f"Exception occurred {e}")
self.logger.error(traceback.format_exc())
def process_futures(self):
while True:
done = []
with self.future_lock:
while len(self.futures) == 0 and not self.stopped:
try:
self.future_lock.wait()
except InterruptedError as e:
self.logger.info("Future Processor thread interrupted. Exiting")
return
if self.stopped:
self.logger.info("Future Processor exiting")
return
if len(self.futures) > 0:
try:
for f, u in self.futures:
if f.done():
done.append((f, u))
for x in done:
self.futures.remove(x)
except Exception as e:
self.logger.error(f"Error while adding future to future queue! e: {e}")
self.logger.error(traceback.format_exc())
self.future_lock.notify_all()
if len(done) > 0:
self.logger.debug(f"Processing {len(done)} futures")
for f, u in done:
try:
self.process(future=f, old_unit=u)
except Exception as e:
self.logger.error(f"Error while processing event {type(f)}, {e}")
self.logger.error(traceback.format_exc())
done.clear()
time.sleep(5)
@staticmethod
def process_pool_main(operation: str, handler_class: str, handler_module: str, properties: dict,
unit: ConfigToken, process_lock: multiprocessing.Lock):
global process_pool_logger
handler_class = ReflectionUtils.create_instance_with_params(module_name=handler_module,
class_name=handler_class)
handler_obj = handler_class(process_pool_logger, properties, process_lock)
if operation == Constants.TARGET_CREATE:
return handler_obj.create(unit)
elif operation == Constants.TARGET_DELETE:
return handler_obj.delete(unit)
elif operation == Constants.TARGET_MODIFY:
return handler_obj.modify(unit)
else:
process_pool_logger.error("Invalid operation")
result = {Constants.PROPERTY_TARGET_NAME: operation,
Constants.PROPERTY_TARGET_RESULT_CODE: Constants.RESULT_CODE_EXCEPTION,
Constants.PROPERTY_ACTION_SEQUENCE_NUMBER: 0}
return result, unit
@staticmethod
def process_pool_initializer(log_dir: str, log_file: str, log_level, log_retain: int, log_size: int,
logger: str):
global process_pool_logger
if process_pool_logger is None:
log_file = f"{os.getpid()}-{log_file}"
process_pool_logger = LogHelper.make_logger(log_dir=log_dir, log_file=log_file, log_level=log_level,
log_retain=log_retain, log_size=log_size, logger=logger)
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent
from azure.cli.core.profiles import ResourceType, get_sdk
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_VERSION_DEFAULT, RUNTIME_STACKS)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = client.check_name_availability(name, 'Site')
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'az webapp list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
# API Version 2019-08-01 (latest as of writing this code) does not return slot instances, however 2018-02-01 does
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot,
api_version="2018-02-01")
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
DefaultErrorResponseException, BackupSchedule, BackupRequest = cmd.get_models(
'DefaultErrorResponseException', 'BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None)
vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if deployment_container_image_name is None:
site_config.linux_fx_version = site_config_json[KEYS.LINUX_FX_VERSION]
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# adding appsetting to site to make it a function
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
if disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
return float(version_string)
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log deployment show
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = detect_os_form_src(src_dir, html)
lang_details = get_lang_from_content(src_dir, html)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
|
arduino_uploader.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# 1. Copyright
# 2. Lisence
# 3. Author
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import threading
import time
from . import base
from . import arduino_compiler
from . import arduino_target_params
class Uploader(object):
def __init__(self, path, console=None):
self.message_queue = base.message_queue.MessageQueue(console)
self.compiler = arduino_compiler.Compiler(path, console)
self.error_occured = False
self.params = {}
self.do_touch = False
self.wait_for_upload_port = False
def upload(self, using_programmer=False):
self.compiler.build()
self.message_queue.start_print()
upload_thread = threading.Thread(
target=lambda: self.start_upload(using_programmer))
upload_thread.start()
def start_upload(self, using_programmer):
while not self.compiler.is_finished():
time.sleep(1)
if not self.compiler.has_error():
self.message_queue.put('[Stino - Start uploading...]\\n')
self.params = self.compiler.get_params()
self.prepare_upload_port(using_programmer)
self.prepare_cmds(using_programmer)
self.exec_cmds()
if not self.error_occured:
self.retouch_serial_port()
self.message_queue.put('[Stino - Done uploading.]\\n')
time.sleep(20)
self.message_queue.stop_print()
def prepare_upload_port(self, using_programmer):
settings = base.settings.get_arduino_settings()
self.upload_port = settings.get('serial_port', 'no_serial')
self.params['serial.port'] = self.upload_port
if self.upload_port.startswith('/dev/'):
self.upload_port_file = self.upload_port[5:]
else:
self.upload_port_file = self.upload_port
self.params['serial.port.file'] = self.upload_port_file
if self.upload_port in base.serial_monitor.serials_in_use:
serial_monitor = base.serial_monitor.serial_monitor_dict.get(
self.upload_port, None)
if serial_monitor:
serial_monitor.stop()
if not by_using_programmer(using_programmer, self.params):
bootloader_file = self.params.get('bootloader.file', '')
if 'caterina' in bootloader_file.lower():
self.do_touch = True
self.wait_for_upload_port = True
elif self.params.get('upload.use_1200bps_touch') == 'true':
self.do_touch = True
if self.params.get('upload.wait_for_upload_port') == 'true':
self.wait_for_upload_port = True
if self.do_touch:
before_ports = base.serial_port.list_serial_ports()
if self.upload_port in before_ports:
text = 'Forcing reset using 1200bps open/close '
text += 'on port {0}.\\n'
self.message_queue.put(text, self.upload_port)
base.serial_port.touch_port(self.upload_port, 1200)
if self.wait_for_upload_port:
if base.sys_info.get_os_name() != 'osx':
time.sleep(0.4)
self.upload_port = base.serial_port.wait_for_port(
self.upload_port, before_ports, self.message_queue)
else:
time.sleep(4)
self.params['serial.port'] = self.upload_port
if self.upload_port.startswith('/dev/'):
self.upload_port_file = self.upload_port[5:]
else:
self.upload_port_file = self.upload_port
self.params['serial.port.file'] = self.upload_port_file
if self.params.get('upload.auto_reset', '') == 'true':
text = 'Resetting to bootloader via DTR pulse\\n'
self.message_queue.put(text)
base.serial_port.auto_reset(self.upload_port)
self.params = arduino_target_params.replace_param_values(self.params)
def prepare_cmds(self, using_programmer):
self.cmds = []
if not by_using_programmer(using_programmer, self.params):
if 'post_compile.pattern' in self.params:
self.cmds.append(self.params.get('post_compile.pattern', ''))
self.cmds.append(self.params.get('upload.pattern'))
else:
self.cmds.append(self.params.get('program.pattern', ''))
settings = base.settings.get_arduino_settings()
verify_code = settings.get('verify_code', False)
if verify_code:
self.cmds[-1] = self.cmds[-1] + ' -V'
def exec_cmds(self):
settings = base.settings.get_arduino_settings()
show_upload_output = settings.get('upload_verbose', False)
working_dir = self.compiler.get_ide_path()
self.error_occured = arduino_compiler.exec_cmds(
working_dir, self.cmds, self.message_queue, show_upload_output)
def retouch_serial_port(self):
if self.do_touch:
if self.wait_for_upload_port:
time.sleep(0.1)
timeout = time.time() + 2
while timeout > time.time():
ports = base.serial_port.list_serial_ports()
if self.upload_port in ports:
base.serial_port.touch_port(self.upload_port, 9600)
break
time.sleep(0.25)
else:
base.serial_port.touch_port(self.upload_port, 9600)
def by_using_programmer(using_programmer, params):
state = False
upload_protocol = params.get('upload.protocol', '')
upload_uploader = params.get('upload.uploader', '')
if (using_programmer or upload_protocol is None) and \
upload_uploader != 'dfu-util':
state = True
return state
|
TServer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from six.moves import queue
import os
import sys
import threading
import traceback
import logging
logger = logging.getLogger(__name__)
from thriftpy3.Thrift import TProcessor
from thriftpy3.protocol import TBinaryProtocol
from thriftpy3.transport import TTransport
class TServer:
"""Base interface for a server, which must have a serve() method.
Three constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logger.exception(x)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.clients = queue.Queue()
self.threads = 10
self.daemon = kwargs.get("daemon", False)
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""Loop around getting clients from the shared queue and process them."""
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception as x:
logger.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.setDaemon(self.daemon)
t.start()
except Exception as x:
logger.exception(x)
# Pump the socket for clients
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
self.clients.put(client)
except Exception as x:
logger.exception(x)
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def try_close(file):
try:
file.close()
except IOError as e:
logger.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
try:
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self.collect_children()
# Parent must close socket or the connection may not get
# closed promptly
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as e:
logger.exception(e)
ecode = 1
finally:
try_close(itrans)
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
def collect_children(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
|
Ui_SimavlinkUi.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Administrator.2013-20160524CL\Desktop\Simavlink\SimavlinkUi.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from src import freqSerial
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QColor
from mavlink import *
from Mythread import *
import time
import threading
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(727, 523)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(727, 523))
Dialog.setMaximumSize(QtCore.QSize(727, 523))
Dialog.setSizeGripEnabled(True)
self.line = QtWidgets.QFrame(Dialog)
self.line.setGeometry(QtCore.QRect(10, 10, 480, 10))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(Dialog)
self.line_2.setGeometry(QtCore.QRect(5, 15, 10, 230))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line_3 = QtWidgets.QFrame(Dialog)
self.line_3.setGeometry(QtCore.QRect(10, 240, 480, 10))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.line_4 = QtWidgets.QFrame(Dialog)
self.line_4.setGeometry(QtCore.QRect(485, 15, 10, 230))
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.line_5 = QtWidgets.QFrame(Dialog)
self.line_5.setGeometry(QtCore.QRect(500, 15, 10, 230))
self.line_5.setFrameShape(QtWidgets.QFrame.VLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.line_6 = QtWidgets.QFrame(Dialog)
self.line_6.setGeometry(QtCore.QRect(505, 10, 210, 10))
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.line_7 = QtWidgets.QFrame(Dialog)
self.line_7.setGeometry(QtCore.QRect(505, 240, 210, 10))
self.line_7.setFrameShape(QtWidgets.QFrame.HLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.line_8 = QtWidgets.QFrame(Dialog)
self.line_8.setGeometry(QtCore.QRect(710, 15, 10, 230))
self.line_8.setFrameShape(QtWidgets.QFrame.VLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.line_9 = QtWidgets.QFrame(Dialog)
self.line_9.setGeometry(QtCore.QRect(10, 250, 705, 10))
self.line_9.setFrameShape(QtWidgets.QFrame.HLine)
self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_9.setObjectName("line_9")
self.line_10 = QtWidgets.QFrame(Dialog)
self.line_10.setGeometry(QtCore.QRect(5, 255, 10, 260))
self.line_10.setFrameShape(QtWidgets.QFrame.VLine)
self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_10.setObjectName("line_10")
self.line_11 = QtWidgets.QFrame(Dialog)
self.line_11.setGeometry(QtCore.QRect(10, 510, 705, 10))
self.line_11.setFrameShape(QtWidgets.QFrame.HLine)
self.line_11.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_11.setObjectName("line_11")
self.line_12 = QtWidgets.QFrame(Dialog)
self.line_12.setGeometry(QtCore.QRect(710, 255, 10, 260))
self.line_12.setFrameShape(QtWidgets.QFrame.VLine)
self.line_12.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_12.setObjectName("line_12")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 20, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(20, 260, 131, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(510, 20, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(20, 60, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(20, 100, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.spinBox = QtWidgets.QSpinBox(Dialog)
self.spinBox.setGeometry(QtCore.QRect(220, 60, 71, 31))
self.spinBox.setObjectName("spinBox")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(340, 60, 141, 71))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label_6 = QtWidgets.QLabel(Dialog)
self.label_6.setGeometry(QtCore.QRect(20, 150, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Dialog)
self.label_7.setGeometry(QtCore.QRect(20, 190, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.lineEdit_2 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_2.setGeometry(QtCore.QRect(120, 190, 41, 31))
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_3.setGeometry(QtCore.QRect(175, 190, 41, 31))
self.lineEdit_3.setText("")
self.lineEdit_3.setObjectName("lineEdit_3")
self.lineEdit_4 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_4.setGeometry(QtCore.QRect(230, 190, 41, 31))
self.lineEdit_4.setObjectName("lineEdit_4")
self.lineEdit_5 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_5.setGeometry(QtCore.QRect(285, 190, 41, 31))
self.lineEdit_5.setObjectName("lineEdit_5")
self.label_8 = QtWidgets.QLabel(Dialog)
self.label_8.setGeometry(QtCore.QRect(165, 190, 16, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(Dialog)
self.label_9.setGeometry(QtCore.QRect(220, 190, 16, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(Dialog)
self.label_10.setGeometry(QtCore.QRect(275, 190, 16, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(340, 160, 141, 61))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.label_11 = QtWidgets.QLabel(Dialog)
self.label_11.setGeometry(QtCore.QRect(510, 70, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.lineEdit_6 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_6.setGeometry(QtCore.QRect(610, 70, 71, 31))
self.lineEdit_6.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_6.setObjectName("lineEdit_6")
self.label_12 = QtWidgets.QLabel(Dialog)
self.label_12.setGeometry(QtCore.QRect(510, 110, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.lineEdit_7 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_7.setGeometry(QtCore.QRect(610, 110, 71, 31))
self.lineEdit_7.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_7.setObjectName("lineEdit_7")
self.label_13 = QtWidgets.QLabel(Dialog)
self.label_13.setGeometry(QtCore.QRect(690, 70, 21, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(Dialog)
self.label_14.setGeometry(QtCore.QRect(690, 110, 21, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.radioButton = QtWidgets.QRadioButton(Dialog)
self.radioButton.setGeometry(QtCore.QRect(510, 150, 111, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.radioButton.setFont(font)
self.radioButton.setObjectName("radioButton")
self.radioButton_1 = QtWidgets.QRadioButton(Dialog)
self.radioButton_1.setGeometry(QtCore.QRect(621, 150, 100, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.radioButton_1.setFont(font)
self.radioButton_1.setObjectName("radioButton_1")
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setGeometry(QtCore.QRect(510, 190, 201, 51))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.pushButton_3.setFont(font)
self.pushButton_3.setObjectName("pushButton_3")
self.label_15 = QtWidgets.QLabel(Dialog)
self.label_15.setGeometry(QtCore.QRect(20, 350, 61, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(Dialog)
self.label_16.setGeometry(QtCore.QRect(190, 350, 61, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.comboBox_3 = QtWidgets.QComboBox(Dialog)
self.comboBox_3.setGeometry(QtCore.QRect(120, 100, 171, 31))
self.comboBox_3.setObjectName("comboBox_3")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.line_13 = QtWidgets.QFrame(Dialog)
self.line_13.setGeometry(QtCore.QRect(10, 140, 480, 10))
self.line_13.setFrameShape(QtWidgets.QFrame.HLine)
self.line_13.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_13.setObjectName("line_13")
self.label_17 = QtWidgets.QLabel(Dialog)
self.label_17.setGeometry(QtCore.QRect(20, 400, 61, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_17.setFont(font)
self.label_17.setObjectName("label_17")
self.label_18 = QtWidgets.QLabel(Dialog)
self.label_18.setGeometry(QtCore.QRect(190, 400, 61, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.label_19 = QtWidgets.QLabel(Dialog)
self.label_19.setGeometry(QtCore.QRect(20, 450, 61, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
self.label_20 = QtWidgets.QLabel(Dialog)
self.label_20.setGeometry(QtCore.QRect(190, 450, 61, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_20.setFont(font)
self.label_20.setObjectName("label_20")
self.label_sys = QtWidgets.QLabel(Dialog)
self.label_sys.setGeometry(QtCore.QRect(131, 260, 50, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_sys.setFont(font)
self.label_sys.setObjectName("label_sys")
self.lineEdit_sys = QtWidgets.QLineEdit(Dialog)
self.lineEdit_sys.setGeometry(QtCore.QRect(181, 260, 40, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_sys.setFont(font)
self.lineEdit_sys.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_sys.setObjectName("lineEdit_sys")
self.label_comp = QtWidgets.QLabel(Dialog)
self.label_comp.setGeometry(QtCore.QRect(226, 260, 70, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_comp.setFont(font)
self.label_comp.setObjectName("label_sys")
self.lineEdit_comp = QtWidgets.QLineEdit(Dialog)
self.lineEdit_comp.setGeometry(QtCore.QRect(296, 260, 40, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_comp.setFont(font)
self.lineEdit_comp.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_comp.setObjectName("lineEdit_sys")
self.lineEdit_8 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_8.setGeometry(QtCore.QRect(90, 350, 81, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_8.setFont(font)
self.lineEdit_8.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_8.setObjectName("lineEdit_8")
self.lineEdit_9 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_9.setGeometry(QtCore.QRect(260, 350, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_9.setFont(font)
self.lineEdit_9.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_9.setObjectName("lineEdit_9")
self.lineEdit_10 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_10.setGeometry(QtCore.QRect(90, 400, 81, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_10.setFont(font)
self.lineEdit_10.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_10.setObjectName("lineEdit_10")
self.lineEdit_11 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_11.setGeometry(QtCore.QRect(260, 400, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_11.setFont(font)
self.lineEdit_11.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_11.setObjectName("lineEdit_11")
self.lineEdit_12 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_12.setGeometry(QtCore.QRect(90, 450, 81, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_12.setFont(font)
self.lineEdit_12.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_12.setObjectName("lineEdit_12")
self.lineEdit_13 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_13.setGeometry(QtCore.QRect(260, 450, 91, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lineEdit_13.setFont(font)
self.lineEdit_13.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lineEdit_13.setObjectName("lineEdit_13")
self.label_21 = QtWidgets.QLabel(Dialog)
self.label_21.setGeometry(QtCore.QRect(20, 300, 51, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.comboBox_4 = QtWidgets.QComboBox(Dialog)
self.comboBox_4.setGeometry(QtCore.QRect(90, 300, 171, 31))
self.comboBox_4.setObjectName("comboBox_4")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.line_14 = QtWidgets.QFrame(Dialog)
self.line_14.setGeometry(QtCore.QRect(360, 255, 10, 260))
self.line_14.setFrameShape(QtWidgets.QFrame.VLine)
self.line_14.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_14.setObjectName("line_14")
self.label_22 = QtWidgets.QLabel(Dialog)
self.label_22.setGeometry(QtCore.QRect(380, 260, 171, 31))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_22.setFont(font)
self.label_22.setObjectName("label_22")
self.textBrowser = QtWidgets.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(380, 300, 321, 192))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.textBrowser.setFont(font)
self.textBrowser.setObjectName("textBrowser")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.serial0 = None
self.label_serial0connect_true = 0
self.mythread = MyThread()
self.send_flag = False
self.pushButton.clicked.connect(self.serialconnect)
self.pushButton_3.clicked.connect(self.sendbutton)
self.t = None
self.thread_stop = False
self.t_browser = None
self.mavlink_message = ''
self.msgInterval = 0
self.sendNum = 1
self.mavlink_arg0 = 0
self.mavlink_arg1 = 0
self.mavlink_arg2 = 0
self.mavlink_arg3 = 0
self.mavlink_arg4 = 0
self.mavlink_arg5 = 0
self.sendmsg_hex = []
self.sendmsg_num = 0
def serialconnect(self):
if self.label_serial0connect_true != 1:
serialNum = self.spinBox.value()
COM = "COM" + str(serialNum)
baudrate = self.comboBox_3.currentText()
try:
baudrate = int(baudrate)
print("COM:%s,baudrate:%s" % (COM, baudrate))
self.serial0 = freqSerial.connect_serial(COM, baudrate)
except Exception as e:
print(repr(e))
QMessageBox.warning(Dialog, "警告",
"串口波特率不对", QMessageBox.Yes)
self.serial0 = None
if self.serial0 is not None:
self.label_serial0connect_true = 1
if self.label_serial0connect_true == 0:
QMessageBox.warning(None, "警告", "打开串口失败", QMessageBox.Yes)
else:
self.pushButton.setText("连接成功")
self.t_browser = threading.Thread(target=self.thread_browser)
self.t_browser.start()
else:
self.serial0.close()
self.serial0 = None
self.label_serial0connect_true = 0
self.pushButton.setText("连接")
def mavlinkSet(self):
self.mavlink_message = self.comboBox_4.currentText()
if self.lineEdit_8.text() != '':
self.mavlink_arg0 = int(self.lineEdit_8.text())
if self.lineEdit_9.text() != '':
self.mavlink_arg1 = int(self.lineEdit_9.text())
if self.lineEdit_10.text() != '':
self.mavlink_arg2 = int(self.lineEdit_10.text())
if self.lineEdit_11.text() != '':
self.mavlink_arg3 = int(self.lineEdit_11.text())
if self.lineEdit_12.text() != '':
self.mavlink_arg4 = int(self.lineEdit_12.text())
if self.lineEdit_13.text() != '':
self.mavlink_arg5 = int(self.lineEdit_13.text())
def sendSet(self):
if self.lineEdit_6.text() != '':
self.sendNum = int(self.lineEdit_6.text())
if self.lineEdit_7.text() != '':
self.msgInterval = int(self.lineEdit_7.text())
self.circle_flag = self.radioButton.isChecked()
def mavlinkInit(self, SendCallfunc, flag):
if flag == "send":
fp = open("mavlink_send.log", "wb")
if flag == "recv":
fp = open("mavlink_recv.log", "wb")
self.mavlink = MAVLink(file=fp, srcSystem=1, srcComponent=0)
self.mavlink.set_send_callback(SendCallfunc)
if self.lineEdit_sys.text() != '':
self.mavlink.srcSystem = int(self.lineEdit_sys.text())
if self.lineEdit_comp.text() != '':
self.mavlink.srcComponent = int(self.lineEdit_comp.text())
def mavlinkSend(self, mavlink_message, arg0, arg1, arg2, arg3, arg4, arg5):
if mavlink_message == "heartbeat":
self.mavlink.heartbeat_send(arg0, arg1, arg2, arg3, arg4, arg5)
if mavlink_message == "ping":
tm = time.time()
tm = int(tm * 1000000)
arg0 = tm
print("tm:", tm)
seq = arg1
self.ping_msg = [arg0, seq]
self.mavlink.ping_send(arg0, arg1, arg2, arg3, arg4)
if mavlink_message == "file_transfer_protocol":
data = range(251)
data = list(data)
data = bytes(data)
self.mavlink.file_transfer_protocol_send(arg0, arg1, arg2, data)
def serialcallfunc(self, mavmsg):
byte_array = mavmsg.get_msgbuf()
hex_list = []
head_msg = b'\xFA\xFF\x32\x48'
buf = head_msg + byte_array[1:]
hex_array = list(buf)
for i in hex_array:
byte_hex = hex(i)
if len(byte_hex) == 3:
byte_hex = byte_hex.replace('x', 'x0')
hex_list.append(byte_hex)
print("msg:", hex_list)
self.serial0.write(buf)
def sendbutton(self):
self.SendCallfunc = None
if self.serial0 != None:
self.SendCallfunc = self.serialcallfunc
if self.SendCallfunc == None:
QMessageBox.warning(None, "警告", "发送失败, 发送方式没有设置!", QMessageBox.Yes)
return None
self.mavlinkSet()
self.sendSet()
if self.msgInterval <= 0:
QMessageBox.warning(None, "警告", "发送失败, 报文间隔设置错误!", QMessageBox.Yes)
return None
self.mavlinkInit(self.SendCallfunc, "send")
if self.send_flag == False:
self.t = threading.Thread(target=self.thread_send)
self.t.start()
self.thread_stop = False
self.send_flag = True
self.pushButton_3.setText("正在发送")
else:
print("Thread stop!")
self.thread_stop = True
self.send_flag = False
self.pushButton_3.setText("发送")
def thread_send(self):
if self.circle_flag == True:
while True:
if self.thread_stop == True:
break
self.mavlinkSend(self.mavlink_message, self.mavlink_arg0,
self.mavlink_arg1, self.mavlink_arg2,
self.mavlink_arg3, self.mavlink_arg4,
self.mavlink_arg5)
time.sleep(self.msgInterval / 1000)
print("continue thread send the msg!")
else:
for i in range(self.sendNum):
if self.thread_stop == True:
break
self.mavlinkSend(self.mavlink_message, self.mavlink_arg0,
self.mavlink_arg1, self.mavlink_arg2,
self.mavlink_arg3, self.mavlink_arg4,
self.mavlink_arg5)
self.sendmsg_num = i + 1
print("thread send the msg:", i)
#self.textBrowser.append("thread send the msg:%d" % i)
time.sleep(self.msgInterval / 1000)
self.thread_stop = False
self.send_flag = False
self.pushButton_3.setText("发送")
def thread_browser(self):
text = ""
ping_flag = 0
delay_tm = 0
recv_msg = 0
list_hex = []
while True:
if self.serial0 == None or self.label_serial0connect_true == 0:
break
if self.serial0.isOpen():
try:
n = self.serial0.inWaiting()
byte_char = self.serial0.read(n)
#text = str(byte_char, encoding="utf-8")
list_hex = list(byte_char)
if self.mavlink_message != "ping" and self.radioButton_1.isChecked() and list_hex != []:
recv_msg += 1
if self.mavlink_message == "ping" and list_hex != []:
if list_hex[-4] == 0:
self.mavlinkInit(self.SendCallfunc, "recv")
tm = time.time()
tm = int(tm * 1000000)
arg0 = tm
arg1 = 1
self.mavlinkSend("ping", arg0, arg1, 1, 0, 0, 0)
if list_hex[-4] == 1:
ping_flag = 1
recv_tm = time.time()
recv_tm = int(recv_tm * 1000000)
delay_tm = recv_tm - self.ping_msg[0]
for i in list_hex:
byte_hex = hex(i)
if len(byte_hex) == 3:
byte_hex = byte_hex.replace('x', 'x0')
text += byte_hex + " "
except Exception as e:
print(repr(e))
break
if text != "":
if ping_flag == 1:
self.textBrowser.append(
"接收报文: 类型-ping 延时- %d us" % delay_tm)
else:
self.textBrowser.append("接收报文:")
text += "\n"
self.textBrowser.append(text)
if ping_flag != 1:
self.textBrowser.append(
"===================================")
self.textBrowser.append("环回报告:")
self.textBrowser.append("发送报文 - %d" % self.sendmsg_num)
self.textBrowser.append("接收报文 - %d" % recv_msg)
self.textBrowser.append(
"===================================\n")
# time.sleep(0.1)
text = ""
self.sendmsg_num = 0
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Simavlink_v1.7"))
self.label.setText(_translate("Dialog", "串口配置"))
self.label_2.setText(_translate("Dialog", "Mavlink配置"))
self.label_3.setText(_translate("Dialog", "发送配置"))
self.label_4.setText(_translate("Dialog", "串口号:"))
self.label_5.setText(_translate("Dialog", "波特率:"))
self.pushButton.setText(_translate("Dialog", "连接"))
self.label_6.setText(_translate("Dialog", "网口配置"))
self.label_7.setText(_translate("Dialog", "网口地址:"))
self.label_8.setText(_translate("Dialog", "."))
self.label_9.setText(_translate("Dialog", "."))
self.label_10.setText(_translate("Dialog", "."))
self.pushButton_2.setText(_translate("Dialog", "测试"))
self.label_11.setText(_translate("Dialog", "报文个数:"))
self.label_12.setText(_translate("Dialog", "报文间隔:"))
self.label_13.setText(_translate("Dialog", "个"))
self.label_14.setText(_translate("Dialog", "ms"))
self.radioButton.setText(_translate("Dialog", "连续发包"))
self.radioButton_1.setText(_translate("Dialog", "环回检查"))
self.pushButton_3.setText(_translate("Dialog", "发送"))
self.label_15.setText(_translate("Dialog", "arg_0:"))
self.label_16.setText(_translate("Dialog", "arg_1:"))
self.comboBox_3.setItemText(0, _translate("Dialog", "9600"))
self.comboBox_3.setItemText(1, _translate("Dialog", "38400"))
self.comboBox_3.setItemText(2, _translate("Dialog", "57600"))
self.comboBox_3.setItemText(3, _translate("Dialog", "115200"))
self.label_17.setText(_translate("Dialog", "arg_2:"))
self.label_18.setText(_translate("Dialog", "arg_3:"))
self.label_19.setText(_translate("Dialog", "arg_4:"))
self.label_20.setText(_translate("Dialog", "arg_5:"))
self.label_21.setText(_translate("Dialog", "报文:"))
self.comboBox_4.setItemText(0, _translate("Dialog", "heartbeat"))
self.comboBox_4.setItemText(1, _translate("Dialog", "ping"))
self.comboBox_4.setItemText(2, _translate(
"Dialog", "file_transfer_protocol"))
self.label_22.setText(_translate("Dialog", "Mavlink报文接收"))
self.label_sys.setText(_translate("Dialog", "SysID:"))
self.label_comp.setText(_translate("Dialog", "CompID:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
DataProcess.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 30 14:40:05 2020
Goal: After getting the api_response from the trigger_api module
process the data into a dataframe, and write it into a csv file.
@author: Zhiyue June Wei
"""
import pandas as pd
import threading
from CrunchbaseAPI.trigger_api import trigger_fct
# Global variable (the data frame that store the records)
df_result = []
# Function that will be called in the Thread object (input the id of the page)
def thread_function(page_id, label, since_time, sort_order, name, query, domain_name, locations, types, socials):
print("This is {}-th page reading!".format(page_id))
global df_result
# Obatin the raw data in the page specified by the page_id
total_data = trigger_fct(label, since_time, sort_order, page_id, name, query, domain_name, locations, types, socials)
for data_pt in total_data["data"]["items"]:
# Extract the properties of each satisfied data point
pro_info = data_pt["properties"]
df_result = df_result.append(pro_info, ignore_index = True)
# Method: return a number of records (the number needs to be specific)
def get_data(label, number = 0, since_time = None, sort_order = None, page = 1, name = None, query = None, domain_name = None, locations = None, types = None, socials = None):
global df_result
# Call the trigger_fct function to get the api_response
raw_result = trigger_fct(label, since_time, sort_order, page, name, query, domain_name, locations, types, socials)
# The number of the records that are going to be returned
total_number = raw_result["data"]["paging"]["total_items"]
number_per_page = raw_result["data"]["paging"]["items_per_page"]
# Check the validity of the input number of the results
if number < 0:
print("WARNING: Please input a valid query number (non-negative)!")
return
elif number == 0:
print("WARNING: The number is zero, will return no result!")
return
else:
# Create an empty dataframe for returning the results
df_result = pd.DataFrame(columns = raw_result["data"]["items"][0]["properties"].keys())
# Adjust the input number
if number >= total_number:
number = total_number
# Handling the pages
page_number = number // number_per_page
if number % number_per_page:
last_page = number % number_per_page
#print(page_number, last_page)
# Using multi-threading to handle the page reading
threads = list()
for page in range(page_number):
print("This is the {}-th page".format(page + 1))
x = threading.Thread(target = thread_function, args = (page + 1, label, since_time, sort_order, name, query, domain_name, locations, types, socials))
threads.append(x)
x.start()
for thread_element in threads:
thread_element.join()
# Handle the last_page
last_page_data = trigger_fct(label, since_time, sort_order, page_number + 1, name, query, domain_name, locations, types, socials)
count = 0
for last_item in last_page_data["data"]["items"]:
if count >= last_page:
break
#print(last_item["properties"])
df_result = df_result.append(last_item["properties"], ignore_index = True)
count += 1
return df_result
|
__main__.py
|
import argparse
from ctypes import ArgumentError
import asyncio
import yaml
from typing import NamedTuple, List
from enum import Enum
import threading
import traceback
import appdirs
from pathlib import Path
import sys
from datetime import datetime
import os
import time
import signal
from . import subprocess_impl
from pyri.util.wait_exit import wait_exit
from pyri.plugins.service_node_launch import get_all_service_node_launches
from pyri.device_manager_client import _DeviceManagerConnectFilter
from RobotRaconteur.Client import *
from RobotRaconteurCompanion.Util.IdentifierUtil import IdentifierUtil
# Based on MS Windows service states
class ProcessState(Enum):
STOPPED = 0x1
START_PENDING = 0x2
STOP_PENDING = 0x3
RUNNING = 0x4
CONTINUE_PENDING = 0x5
PAUSE_PENDING = 0x6
PAUSED = 0x7
#TODO: Don't hard code services to start
# service_node_launch = [
# ServiceNodeLaunch("variable_storage", "pyri.variable_storage",["--db-file=test3.db"],[]),
# ServiceNodeLaunch("device_manager","pyri.device_manager",[],["variable_storage"]),
# ServiceNodeLaunch("devices_states","pyri.devices_states",[],["device_manager"]),
# ServiceNodeLaunch("sandbox","pyri.sandbox", [],["device_manager"]),
# ServiceNodeLaunch("program_master","pyri.program_master",[],["device_manager"]),
# ServiceNodeLaunch("robotics_jog","pyri.robotics.robotics_jog_service",[],["device_manager"]),
# ServiceNodeLaunch("robotics_motion","pyri.robotics.robotics_motion_service",[],["device_manager"]),
# ServiceNodeLaunch("webui_server","pyri.webui_server", ["--device-manager-url=rr+tcp://{{ HOSTNAME }}:59902?service=device_manager"],["device_manager"])
# ]
class PyriProcess:
def __init__(self, parent, service_node_launch, parser_results, log_dir, loop):
self.parent = parent
self.service_node_launch = service_node_launch
self.parser_results = parser_results
self.log_dir = log_dir
self.loop = loop
self._keep_going = True
self._process = None
async def run(self):
s = self.service_node_launch
stdout_log_fname = self.log_dir.joinpath(f"{s.name}.txt")
stderr_log_fname = self.log_dir.joinpath(f"{s.name}.stderr.txt")
with open(stdout_log_fname,"w") as stdout_log, open(stderr_log_fname,"w") as stderr_log:
while self._keep_going:
try:
self.parent.process_state_changed(s.name,ProcessState.START_PENDING)
stderr_log.write(f"Starting process {s.name}...\n")
args = s.prepare_service_args(self.parser_results)
python_exe = sys.executable
self._process = await subprocess_impl.create_subprocess_exec(python_exe,(["-m", s.module_main] + args))
# print(f"process pid: {self._process.pid}")
stderr_log.write(f"Process {s.name} started\n\n")
self.parent.process_state_changed(s.name,ProcessState.RUNNING)
stdout_read_task = asyncio.ensure_future(self._process.stdout.readline())
stderr_read_task = asyncio.ensure_future(self._process.stderr.readline())
while self._keep_going:
wait_tasks = list(filter(lambda x: x is not None, [stdout_read_task, stderr_read_task]))
if len(wait_tasks) == 0:
break
done, pending = await asyncio.wait(wait_tasks,return_when=asyncio.FIRST_COMPLETED)
if stderr_read_task in done:
stderr_line = await stderr_read_task
if len(stderr_line) == 0:
stderr_read_task = None
else:
stderr_log.write(stderr_line.decode("utf-8"))
stderr_log.flush()
stderr_read_task = asyncio.ensure_future(self._process.stderr.readline())
if stdout_read_task in done:
stdout_line = await stdout_read_task
if len(stdout_line) == 0:
stdout_read_task = None
else:
stdout_log.write(stdout_line.decode("utf-8"))
stdout_log.flush()
stdout_read_task = asyncio.ensure_future(self._process.stdout.readline())
await self._process.wait()
self.parent.process_state_changed(s.name,ProcessState.STOPPED)
except:
self._process = None
self.parent.process_state_changed(s.name,ProcessState.STOPPED)
traceback.print_exc()
stderr_log.write(f"\nProcess {s.name} error:\n")
stderr_log.write(traceback.format_exc())
self._process = None
if not s.restart:
break
if self._keep_going:
await asyncio.sleep(s.restart_backoff)
@property
def process_state(self):
pass
@property
def stopped(self):
return self._process == None
def close(self):
self._keep_going = False
if self._process:
self._process.send_term()
def kill(self):
p = self._process
if p is None:
return
try:
self._process.kill()
except:
traceback.print_exc()
class PyriCore:
def __init__(self, device_info, service_node_launches, parser_results, log_dir, loop):
self.device_info = device_info
self.service_node_launches = dict()
self._closed = False
for s in service_node_launches:
self.service_node_launches[s.name] = s
self.log_dir = log_dir
self._loop = loop
self._parser_results = parser_results
self._subprocesses = dict()
self._lock = threading.RLock()
def _do_start(self,s):
p = PyriProcess(self, s, self._parser_results, self.log_dir, self._loop)
self._subprocesses[s.name] = p
self._loop.create_task(p.run())
def start_all(self):
with self._lock:
for name,s in self.service_node_launches.items():
if name not in self._subprocesses:
self._do_start(s)
def start(self, name):
with self._lock:
if self._closed:
assert False, "Already closed"
try:
s = self.service_node_launches[name]
except KeyError:
raise ArgumentError(f"Invalid service requested: {name}")
if name not in self._subprocesses:
self._do_start(s)
def process_state_changed(self, process_name, state):
print(f"Process changed {process_name} {state}")
if self._closed:
if state == ProcessState.STOPPED:
with self._lock:
if process_name in self._subprocesses:
del self._subprocesses[process_name]
def check_deps_status(self, deps):
return True
def close(self):
with self._lock:
if self._closed:
return
self._closed = True
for p in self._subprocesses.values():
try:
p.close()
except Exception:
traceback.print_exc()
pass
self._wait_all_closed()
def _wait_all_closed(self):
try:
t1 = time.time()
t_last_sent_close = 0
while True:
t_diff = time.time() - t1
if t_diff > 15:
break
running_count = 0
with self._lock:
for p in self._subprocesses.values():
if not p.stopped:
running_count += 1
if running_count == 0:
break
time.sleep(0.1)
if t_diff > t_last_sent_close + 1:
t_last_sent_close = t_diff
with self._lock:
for p in self._subprocesses.values():
if not p.stopped:
try:
p.close()
except Exception:
traceback.print_exc()
pass
running_count = 0
with self._lock:
for p in self._subprocesses.values():
if not p.stopped:
running_count += 1
try:
p.kill()
except Exception:
traceback.print_exc()
if running_count != 0:
print("Sending processes still running SIGKILL")
time.sleep(2)
self._loop.stop()
except:
traceback.print_exc()
def add_default_devices(self, delay_seconds=5):
self._loop.create_task(self._do_add_default_devices(delay_seconds))
async def _do_add_default_devices(self, delay_seconds):
default_devices = []
for l in self.service_node_launches.values():
if l.default_devices is not None and len(l.default_devices) > 0:
default_devices.extend(l.default_devices)
if len(default_devices) == 0:
return
filter = _DeviceManagerConnectFilter("pyri_device_manager")
device_manager_sub = RRN.SubscribeServiceByType("tech.pyri.device_manager.DeviceManager", filter.get_filter())
if delay_seconds > 0:
await asyncio.sleep(delay_seconds)
a, f = await self._do_add_default_devices2(default_devices, device_manager_sub)
while f is None or len(f) > 0:
print("Retrying add default devices...")
await asyncio.sleep(5)
a, f = await self._do_add_default_devices2(default_devices, device_manager_sub)
async def _do_add_default_devices2(self, default_devices, device_manager_sub):
res, c = device_manager_sub.TryGetDefaultClient()
if not res:
print("Warning: could not connect to device manager to add default devices")
return None, None
active_devices = await c.async_getf_active_devices(None)
active_device_names = [a.local_device_name for a in active_devices]
ident_util = IdentifierUtil(client_obj = c)
added_devices = []
failed_devices = []
for d in default_devices:
try:
if d[1] not in active_device_names:
d_ident = ident_util.CreateIdentifierFromName(d[0])
await c.async_add_device(d_ident,d[1],[],None)
added_devices.append(d[1])
except Exception as e:
print(f"Warning: could not add default device {d[1]}: {str(e)}")
failed_devices.append(d[1])
if len(added_devices) > 0:
print(f"Added default devices: {added_devices}")
return added_devices, failed_devices
def main():
try:
service_node_launch_dict = get_all_service_node_launches()
service_node_launch = []
for l in service_node_launch_dict.values():
service_node_launch.extend(l)
parser = argparse.ArgumentParser("PyRI Core Launcher")
parser.add_argument("--no-add-default-devices",action='store_true',default=False,help="Don't add default devices")
for l in service_node_launch:
if l.add_arg_parser_options is not None:
l.add_arg_parser_options(parser)
parser_results, _ = parser.parse_known_args()
timestamp = datetime.now().strftime("pyri-core-%Y-%m-%d--%H-%M-%S")
log_dir = Path(appdirs.user_log_dir(appname="pyri-project")).joinpath(timestamp)
log_dir.mkdir(parents=True, exist_ok=True)
def loop_in_thread(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
print("Exited loop!")
loop = asyncio.new_event_loop()
t = threading.Thread(target=loop_in_thread, args=(loop,), daemon=True)
t.start()
core = PyriCore(None, service_node_launch, parser_results, log_dir, loop)
loop.call_soon_threadsafe(lambda: core.start_all())
if not parser_results.no_add_default_devices:
loop.call_soon_threadsafe(lambda: core.add_default_devices())
def ctrl_c_pressed(signum, frame):
loop.call_soon_threadsafe(lambda: core.close())
signal.signal(signal.SIGINT, ctrl_c_pressed)
signal.signal(signal.SIGTERM, ctrl_c_pressed)
#loop.run_forever()
wait_exit()
core.close()
print("Done")
except Exception:
traceback.print_exc()
if __name__ == "__main__":
main()
|
multi_test.py
|
import multiprocessing
from multiprocessing.managers import BaseManager, BaseProxy, NamespaceProxy
from sharedClass import SharedClass
class Test2(object):
def __init__(self, x, y, z):
self.__setattr__('x', x+10)
self.__setattr__('y', y+10)
self.__setattr__('z', z+10)
def __setattr__(self, key, value):
#print(key, 'Set test2')
super(Test2, self).__setattr__(key, value)
class Test(object):
def __init__(self,x,y,z):
self.__setattr__('x', int(x))
self.__setattr__('y', y)
self.__setattr__('z', z)
self.__setattr__('test2', Test2(x,y,z))
def __getattribute__(self, item):
return object.__getattribute__(self, item)
def __setattr__(self, key, value):
#print(key, 'Set test')
super(Test, self).__setattr__(key, value)
def change1(obj,obj2):
#obj.x = 50
obj.set('x', 50)
#obj.test2.x = 60
#obj.y = 100
obj.set('test2.x', 60)
obj2.set('test2.y', 120)
def change2(obj, obj2):
#obj.set('x', 80)
pass
if __name__ == '__main__':
# creating a list in server process memory
"""
sessionManager.register('Test',Test, Test_Proxy)
manager = sessionManager()
manager.start()
"""
shar = SharedClass(Test)
obj = shar(10,20,30)
obj2 = shar(50,60,80)
# new record to be inserted in records
# creating new processes
p1 = multiprocessing.Process(target=change1, args=(obj,obj2))
p2 = multiprocessing.Process(target=change2, args=(obj,obj2))
# running process p1 to insert new record
p1.start()
p1.join()
# running process p2 to print records
p2.start()
p2.join()
print(obj.test2.x, obj.x, obj2.test2.y)
|
Binance Detect Moonings.py
|
"""
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import importlib
# used for directory handling
import glob
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit
session_profit = 0
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
print(f'Working...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
# retreive latest prices
get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + exnumber + len(volatile_coins)) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
for coin in list(coins_bought):
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['take_profit']) / 100
SL = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['stop_loss']) / 100
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
coins_bought[coin]['take_profit'] = PriceChange + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if (LastPrice < SL or LastPrice > TP) and not USE_TRAILING_STOP_LOSS:
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}")
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
# Log trade
if LOG_TRADES:
profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume'])* (1-(TRADING_FEE*2)) # adjust for trading fee here
write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange-(TRADING_FEE*2):.2f}%")
session_profit=session_profit + (PriceChange-(TRADING_FEE*2))
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
print(f'TP or SL not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}{PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}')
if hsp_head == 1 and len(coins_bought) == 0: print(f'Not holding any coins')
return coins_sold
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
if __name__ == '__main__':
# Load arguments then parse settings
args = parse_args()
mymodule = {}
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
MAX_COINS = parsed_config['trading_options']['MAX_COINS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# use separate files for testing and live trading
if TEST_MODE:
coins_bought_file_path = 'test_' + coins_bought_file_path
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: You are using the Mainnet and live funds. Waiting 30 seconds as a security measure')
time.sleep(30)
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
# load signalling modules
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.daemon = True
t.start()
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(e)
# seed initial prices
get_price()
READ_TIMEOUT_COUNT=0
CONNECTION_ERROR_COUNT = 0
while True:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
except ReadTimeout as rt:
READ_TIMEOUT_COUNT += 1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {READ_TIMEOUT_COUNT}\n{rt}{txcolors.DEFAULT}')
except ConnectionError as ce:
CONNECTION_ERROR_COUNT +=1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {CONNECTION_ERROR_COUNT}\n{ce}{txcolors.DEFAULT}')
|
stdbroker.py
|
# coding=utf-8
import json
import threading
from poco.utils.net.transport.ws import WsSocket
from poco.utils.net.transport.tcp import TcpSocket
from poco.utils import six
if six.PY3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
class StdBroker(object):
def __init__(self, ep1, ep2):
super(StdBroker, self).__init__()
# always ep2 --request---> ep1
# ep2 <--response-- ep1
self.ep1 = self._make_transport(ep1)
self.ep2 = self._make_transport(ep2)
self.requests_map = {} # reqid -> requester cid
self.t = threading.Thread(target=self.loop)
self.t.daemon = True
self.t.start()
def _make_transport(self, ep):
ep = urlparse(ep)
if ep.scheme.startswith('ws'):
transport = WsSocket()
else:
transport = TcpSocket()
transport.bind((ep.hostname, ep.port))
return transport
def deserialize(self, data):
if six.PY3 and not isinstance(data, six.text_type):
data = data.decode('utf-8')
return json.loads(data)
def serialize(self, packet):
return json.dumps(packet)
def handle_request(self):
cid, data = self.ep2.update()
if data:
packet = self.deserialize(data)
reqid = packet['id']
self.requests_map[reqid] = cid
self.ep1.send(None, data)
def handle_response(self):
_, data = self.ep1.update()
if data:
packet = self.deserialize(data)
reqid = packet['id']
cid = self.requests_map.pop(reqid, None)
if cid:
self.ep2.send(cid, data)
def loop(self):
print('StdBroker on.')
while True:
self.handle_request()
self.handle_response()
if __name__ == '__main__':
import sys
import time
if len(sys.argv) < 3:
print('Not enough arguments. Please provide at least 2 endpoints.')
print('e.g. ws://*:15003 tcp://*:15004')
exit(-1)
rpt = StdBroker(sys.argv[1], sys.argv[2])
while True:
time.sleep(5)
|
http.py
|
# -*- coding: utf-8 -*-
import http.server
import socketserver
import rssit.path
import rssit.persistent
import threading
try:
port
except NameError:
port = 0
def do_GET_real(self):
self.protocol_version = "HTTP/1.1"
rssit.path.process(self, self.path)
class handler(http.server.SimpleHTTPRequestHandler):
allow_reuse_address = True
def server_bind(self):
self.socket.setsockopt(self.socket.SOL_SOCKET,
self.socket.SO_REUSEADDR, 1)
self.socket.setsockopt(self.socket.SOL_SOCKET,
self.socket.SO_REUSEPORT, 1)
self.socket.bind(self.server_address)
def do_GET(self):
self.protocol_version = "HTTP/1.1"
rssit.path.process(self, self.path)
#do_GET_real(self)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def serve(wanted_port):
global port
port = wanted_port
while True:
try:
print("Trying port %i" % port)
#socketserver.TCPServer(('', port), handler).serve_forever()
socketserver.TCPServer.allow_reuse_address = True
rssit.persistent.storage["server"] = ThreadedTCPServer(('', port), handler)
thread = threading.Thread(target=rssit.persistent.storage["server"].serve_forever)
thread.deamon = True
thread.start()
except OSError as exc:
if exc.errno != 98:
raise
port += 1
else:
break
def unload():
rssit.persistent.storage["server"].shutdown()
rssit.persistent.storage["server"].socket.close()
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
from bdb import Breakpoint
def reset_Breakpoint():
Breakpoint.next = 1
Breakpoint.bplist = {}
Breakpoint.bpbynumber = [None]
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> reset_Breakpoint()
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_pdb_whatis_command():
"""Test the whatis command
>>> myvar = (1,2)
>>> def myfunc():
... pass
>>> class MyClass:
... def mymethod(self):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'whatis myvar',
... 'whatis myfunc',
... 'whatis MyClass',
... 'whatis MyClass()',
... 'whatis MyClass.mymethod',
... 'whatis MyClass().mymethod',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_whatis_command[3]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) whatis myvar
<class 'tuple'>
(Pdb) whatis myfunc
Function myfunc
(Pdb) whatis MyClass
Class test.test_pdb.MyClass
(Pdb) whatis MyClass()
<class 'test.test_pdb.MyClass'>
(Pdb) whatis MyClass.mymethod
Function mymethod
(Pdb) whatis MyClass().mymethod
Method mymethod
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 1 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(support.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue36250(self):
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_issue26053(self):
# run command of pdb prompt echoes the correct args
script = "print('hello')"
commands = """
continue
run a b c
run d e f
quit
"""
stdout, stderr = self.run_pdb_script(script, commands)
res = '\n'.join([x.strip() for x in stdout.splitlines()])
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn(b"NameError: name 'invalid' is not defined",
stdout)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with support.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def test_issue42384(self):
'''When running `python foo.py` sys.path[0] is an absolute path. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with support.temp_cwd() as cwd:
expected = f'(Pdb) sys.path[0] is {os.path.realpath(cwd)}'
stdout, stderr = self.run_pdb_script(script, commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
@support.skip_unless_symlink
def test_issue42384_symlink(self):
'''When running `python foo.py` sys.path[0] resolves symlinks. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with support.temp_cwd() as cwd:
cwd = os.path.realpath(cwd)
dir_one = os.path.join(cwd, 'dir_one')
dir_two = os.path.join(cwd, 'dir_two')
expected = f'(Pdb) sys.path[0] is {dir_one}'
os.mkdir(dir_one)
with open(os.path.join(dir_one, 'foo.py'), 'w') as f:
f.write(script)
os.mkdir(dir_two)
os.symlink(os.path.join(dir_one, 'foo.py'), os.path.join(dir_two, 'foo.py'))
stdout, stderr = self._run_pdb([os.path.join('dir_two', 'foo.py')], commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
def test_issue42383(self):
with support.temp_cwd() as cwd:
with open('foo.py', 'w') as f:
s = textwrap.dedent("""
print('The correct file was executed')
import os
os.chdir("subdir")
""")
f.write(s)
subdir = os.path.join(cwd, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(subdir, 'subdir'))
wrong_file = os.path.join(subdir, 'foo.py')
with open(wrong_file, 'w') as f:
f.write('print("The wrong file was executed")')
stdout, stderr = self._run_pdb(['foo.py'], 'c\nc\nq')
expected = '(Pdb) The correct file was executed'
self.assertEqual(stdout.split('\n')[6].rstrip('\r'), expected)
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
import_wikidata.py
|
"""
Import an wikidata file into KGTK file
TODO: references
TODO: qualifiers-order
TODO: incorporate calendar into the KGTK data model.
TODO: Incorporate geographic precision into the KGTK data model.
TODO: Incorporate URLs into the KGTK data model.
TODO: Node type needs to be optional in the edge file.
See:
https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON
https://www.wikidata.org/wiki/Special:ListDatatypes
https://www.wikidata.org/wiki/Help:Data_type
"""
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Import an wikidata file into KGTK file'
}
def add_arguments(parser: KGTKArgumentParser):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.io.kgtkreader import KgtkReaderOptions
from kgtk.io.kgtkwriter import KgtkWriter
parser.add_input_file(positional=True, who='input path file (may be .bz2)')
parser.add_argument(
'--procs',
action="store",
type=int,
dest="procs",
default=2,
help='number of processes to run in parallel, default %(default)d')
parser.add_argument(
'--max-size-per-mapper-queue',
action="store",
type=int,
dest="max_size_per_mapper_queue",
default=4,
help='max depth of server queues, default %(default)d')
parser.add_argument(
'--mapper-batch-size',
action="store",
type=int,
dest="mapper_batch_size",
default=5,
help='How many statements to queue in a batch to a worker. (default=%(default)d)')
parser.add_argument(
"--single-mapper-queue",
nargs='?',
type=optional_bool,
dest="single_mapper_queue",
const=True,
default=False,
metavar="True/False",
help="If true, use a single queue for worker tasks. If false, each worker has its own task queue. (default=%(default)s).",
)
parser.add_argument(
"--collect-results",
nargs='?',
type=optional_bool,
dest="collect_results",
const=True,
default=False,
metavar="True/False",
help="If true, collect the results before writing to disk. If false, write results to disk, then concatenate. (default=%(default)s).",
)
parser.add_argument(
"--collect-seperately",
nargs='?',
type=optional_bool,
dest="collect_seperately",
const=True,
default=False,
metavar="True/False",
help="If true, collect the node, edge, and qualifier results using seperate processes. If false, collect the results with a single process. (default=%(default)s).",
)
parser.add_argument(
'--collector-batch-size',
action="store",
type=int,
dest="collector_batch_size",
default=5,
help='How many statements to queue in a batch to the collector. (default=%(default)d)')
parser.add_argument(
"--use-shm",
nargs='?',
type=optional_bool,
dest="use_shm",
const=True,
default=False,
metavar="True/False",
help="If true, use ShmQueue. (default=%(default)s).")
parser.add_argument(
'--collector-queue-per-proc-size',
action="store",
type=int,
dest="collector_queue_per_proc_size",
default=2,
help='collector queue depth per proc, default %(default)d')
parser.add_argument(
"--node", '--node-file',
action="store",
type=str,
dest="node_file",
default=None,
help='path to output node file')
parser.add_argument(
"--edge", '--edge-file', '--detailed-edge-file',
action="store",
type=str,
dest="detailed_edge_file",
default=None,
help='path to output edge file with detailed data')
parser.add_argument(
'--minimal-edge-file',
action="store",
type=str,
dest="minimal_edge_file",
default=None,
help='path to output edge file with minimal data')
parser.add_argument(
"--qual", '--qual-file', '--detailed-qual-file',
action="store",
type=str,
dest="detailed_qual_file",
default=None,
help='path to output qualifier file with full data')
parser.add_argument(
'--minimal-qual-file',
action="store",
type=str,
dest="minimal_qual_file",
default=None,
help='path to output qualifier file with minimal data')
# Optionally write only the ID column to the node file.
parser.add_argument(
'--node-file-id-only',
nargs='?',
type=optional_bool,
dest="node_id_only",
const=True,
default=False,
metavar="True/False",
help='Option to write only the node ID in the node file. (default=%(default)s)')
# The remaining files are KGTK edge files that split out
# special properties, removing them from the edge file.
parser.add_argument(
'--split-alias-file',
action="store",
type=str,
dest="split_alias_file",
default=None,
help='path to output split alias file')
parser.add_argument(
'--split-en-alias-file',
action="store",
type=str,
dest="split_en_alias_file",
default=None,
help='path to output split English alias file')
parser.add_argument(
'--split-datatype-file',
action="store",
type=str,
dest="split_datatype_file",
default=None,
help='path to output split datatype file')
parser.add_argument(
'--split-description-file',
action="store",
type=str,
dest="split_description_file",
default=None,
help='path to output splitdescription file')
parser.add_argument(
'--split-en-description-file',
action="store",
type=str,
dest="split_en_description_file",
default=None,
help='path to output split English description file')
parser.add_argument(
'--split-label-file',
action="store",
type=str,
dest="split_label_file",
default=None,
help='path to output split label file')
parser.add_argument(
'--split-en-label-file',
action="store",
type=str,
dest="split_en_label_file",
default=None,
help='path to output split English label file')
parser.add_argument(
'--split-sitelink-file',
action="store",
type=str,
dest="split_sitelink_file",
default=None,
help='path to output split sitelink file')
parser.add_argument(
'--split-en-sitelink-file',
action="store",
type=str,
dest="split_en_sitelink_file",
default=None,
help='path to output split English sitelink file')
parser.add_argument(
'--split-type-file', '--split-entity-type-file',
action="store",
type=str,
dest="split_type_file",
default=None,
help='path to output split entry type file')
parser.add_argument(
'--split-property-edge-file',
action="store",
type=str,
dest="split_property_edge_file",
default=None,
help='path to output split property edge file')
parser.add_argument(
'--split-property-qual-file',
action="store",
type=str,
dest="split_property_qual_file",
default=None,
help='path to output split property qualifier file')
# TODO: Create a seperate file for the sitelinks.
parser.add_argument(
"--limit",
action="store",
type=int,
dest="limit",
default=None,
help='number of lines of input file to run on, default runs on all')
parser.add_argument(
"--lang",
action="store",
type=str,
dest="lang",
default="en",
help='languages to extract, comma separated, default en')
parser.add_argument(
"--source",
action="store",
type=str,
dest="source",
default="wikidata",
help='wikidata version number, default: wikidata')
parser.add_argument(
"--deprecated",
action="store_true",
dest="deprecated",
help='option to include deprecated statements, not included by default')
parser.add_argument(
"--explode-values",
nargs='?',
type=optional_bool,
dest="explode_values",
const=True,
default=True,
metavar="True/False",
help="If true, create columns with exploded value information. (default=%(default)s).",
)
parser.add_argument(
"--use-python-cat",
nargs='?',
type=optional_bool,
dest="use_python_cat",
const=True,
default=False,
metavar="True/False",
help="If true, use portable code to combine file fragments. (default=%(default)s).",
)
parser.add_argument(
"--keep-temp-files",
nargs='?',
type=optional_bool,
dest="keep_temp_files",
const=True,
default=False,
metavar="True/False",
help="If true, keep temporary files (for debugging). (default=%(default)s).",
)
parser.add_argument(
"--skip-processing",
nargs='?',
type=optional_bool,
dest="skip_processing",
const=True,
default=False,
metavar="True/False",
help="If true, skip processing the input file (for debugging). (default=%(default)s).",
)
parser.add_argument(
"--skip-merging",
nargs='?',
type=optional_bool,
dest="skip_merging",
const=True,
default=False,
metavar="True/False",
help="If true, skip merging temporary files (for debugging). (default=%(default)s).",
)
parser.add_argument(
"--interleave",
nargs='?',
type=optional_bool,
dest="interleave",
const=True,
default=False,
metavar="True/False",
help="If true, output the edges and qualifiers in a single file (the edge file). (default=%(default)s).",
)
parser.add_argument(
"--entry-type-edges",
nargs='?',
type=optional_bool,
dest="entry_type_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for the entry type field. (default=%(default)s).",
)
parser.add_argument(
"--alias-edges",
nargs='?',
type=optional_bool,
dest="alias_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for aliases. (default=%(default)s).",
)
parser.add_argument(
"--datatype-edges",
nargs='?',
type=optional_bool,
dest="datatype_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for property datatypes. (default=%(default)s).",
)
parser.add_argument(
"--description-edges",
nargs='?',
type=optional_bool,
dest="descr_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for descriptions. (default=%(default)s).",
)
parser.add_argument(
"--label-edges",
nargs='?',
type=optional_bool,
dest="label_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for labels. (default=%(default)s).",
)
parser.add_argument(
"--sitelink-edges",
nargs='?',
type=optional_bool,
dest="sitelink_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for sitelinks. (default=%(default)s).",
)
parser.add_argument(
"--sitelink-verbose-edges",
nargs='?',
type=optional_bool,
dest="sitelink_verbose_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for sitelink details (lang, site, badges). (default=%(default)s).",
)
parser.add_argument(
"--sitelink-verbose-qualifiers",
nargs='?',
type=optional_bool,
dest="sitelink_verbose_qualifiers",
const=True,
default=False,
metavar="True/False",
help="If true, create qualifier records for sitelink details (lang, site, badges). (default=%(default)s).",
)
parser.add_argument(
"--parse-aliases",
nargs='?',
type=optional_bool,
dest="parse_aliases",
const=True,
default=True,
metavar="True/False",
help="If true, parse aliases. (default=%(default)s).",
)
parser.add_argument(
"--parse-descriptions",
nargs='?',
type=optional_bool,
dest="parse_descr",
const=True,
default=True,
metavar="True/False",
help="If true, parse descriptions. (default=%(default)s).",
)
parser.add_argument(
"--parse-labels",
nargs='?',
type=optional_bool,
dest="parse_labels",
const=True,
default=True,
metavar="True/False",
help="If true, parse labels. (default=%(default)s).",
)
parser.add_argument(
"--parse-sitelinks",
nargs='?',
type=optional_bool,
dest="parse_sitelinks",
const=True,
default=True,
metavar="True/False",
help="If true, parse sitelinks. (default=%(default)s).",
)
parser.add_argument(
"--parse-claims",
nargs='?',
type=optional_bool,
dest="parse_claims",
const=True,
default=True,
metavar="True/False",
help="If true, parse claims. (default=%(default)s).",
)
parser.add_argument(
"--fail-if-missing",
nargs='?',
type=optional_bool,
dest="fail_if_missing",
const=True,
default=True,
metavar="True/False",
help="If true, fail if expected data is missing. (default=%(default)s).",
)
parser.add_argument(
"--all-languages",
nargs='?',
type=optional_bool,
dest="all_languages",
const=True,
default=False,
metavar="True/False",
help="If true, override --lang and import aliases, dscriptions, and labels in all languages. (default=%(default)s).",
)
parser.add_argument(
"--warn-if-missing",
nargs='?',
type=optional_bool,
dest="warn_if_missing",
const=True,
default=True,
metavar="True/False",
help="If true, print a warning message if expected data is missing. (default=%(default)s).",
)
parser.add_argument(
'--progress-interval',
action="store",
type=int,
dest="progress_interval",
default=500000,
help='How often to report progress. (default=%(default)d)')
parser.add_argument(
"--use-kgtkwriter",
nargs='?',
type=optional_bool,
dest="use_kgtkwriter",
const=True,
default=True,
metavar="True/False",
help="If true, use KgtkWriter instead of csv.writer. (default=%(default)s).")
parser.add_argument(
"--use-mgzip-for-input",
nargs='?',
type=optional_bool,
dest="use_mgzip_for_input",
const=True,
default=False,
metavar="True/False",
help="If true, use the multithreaded gzip package, mgzip, for input. (default=%(default)s).")
parser.add_argument(
"--use-mgzip-for-output",
nargs='?',
type=optional_bool,
dest="use_mgzip_for_output",
const=True,
default=False,
metavar="True/False",
help="If true, use the multithreaded gzip package, mgzip, for output. (default=%(default)s).")
parser.add_argument(
"--mgzip-threads-for-input",
type=int,
default=KgtkReaderOptions.MGZIP_THREAD_COUNT_DEFAULT,
dest="mgzip_threads_for_input",
help="The number of threads per mgzip input streama. (default=%(default)s).")
parser.add_argument(
"--mgzip-threads-for-output",
type=int,
default=KgtkWriter.MGZIP_THREAD_COUNT_DEFAULT,
dest="mgzip_threads_for_output",
help="The number of threads per mgzip output streama. (default=%(default)s).")
parser.add_argument(
'--value-hash-width',
action="store",
type=int,
dest="value_hash_width",
default=8,
help='How many characters should be used in a value hash? (default=%(default)d)')
parser.add_argument(
'--claim-id-hash-width',
action="store",
type=int,
dest="claim_id_hash_width",
default=0,
help='How many characters should be used to hash the claim ID? 0 means do not hash the claim ID. (default=%(default)d)')
def custom_progress()->bool:
return True # We want to start a custom progress monitor.
def run(input_file: KGTKFiles,
procs: int,
max_size_per_mapper_queue: int,
node_file: typing.Optional[str],
detailed_edge_file: typing.Optional[str],
minimal_edge_file: typing.Optional[str],
detailed_qual_file: typing.Optional[str],
minimal_qual_file: typing.Optional[str],
node_id_only: bool,
split_alias_file: typing.Optional[str],
split_en_alias_file: typing.Optional[str],
split_datatype_file: typing.Optional[str],
split_description_file: typing.Optional[str],
split_en_description_file: typing.Optional[str],
split_label_file: typing.Optional[str],
split_en_label_file: typing.Optional[str],
split_sitelink_file: typing.Optional[str],
split_en_sitelink_file: typing.Optional[str],
split_type_file: typing.Optional[str],
split_property_edge_file: typing.Optional[str],
split_property_qual_file: typing.Optional[str],
limit: int,
lang: str,
source: str,
deprecated: bool,
explode_values: bool,
use_python_cat: bool,
keep_temp_files: bool,
skip_processing: bool,
skip_merging: bool,
interleave: bool,
entry_type_edges: bool,
alias_edges: bool,
datatype_edges: bool,
descr_edges: bool,
label_edges: bool,
sitelink_edges: bool,
sitelink_verbose_edges: bool,
sitelink_verbose_qualifiers: bool,
parse_aliases: bool,
parse_descr: bool,
parse_labels: bool,
parse_sitelinks: bool,
parse_claims: bool,
fail_if_missing: bool,
all_languages: bool,
warn_if_missing: bool,
collect_results: bool,
collect_seperately: bool,
collector_queue_per_proc_size: int,
progress_interval: int,
use_shm: bool,
mapper_batch_size: int,
collector_batch_size: int,
single_mapper_queue: bool,
use_kgtkwriter: bool,
use_mgzip_for_input: bool,
use_mgzip_for_output: bool,
mgzip_threads_for_input: int,
mgzip_threads_for_output: int,
value_hash_width: int,
claim_id_hash_width: int,
):
# import modules locally
import bz2
import simplejson as json
import csv
import hashlib
import multiprocessing as mp
import os
from pathlib import Path
import pyrallel
import sys
import time
from kgtk.kgtkformat import KgtkFormat
from kgtk.cli_argparse import KGTKArgumentParser
from kgtk.cli_entry import progress_startup
from kgtk.exceptions import KGTKException
from kgtk.utils.cats import platform_cat
languages=lang.split(',')
ADDL_SITELINK_LABEL: str = "addl_wikipedia_sitelink"
ALIAS_LABEL: str = "alias"
DATATYPE_LABEL: str = "datatype"
DESCRIPTION_LABEL: str = "description"
LABEL_LABEL: str = "label"
SITELINK_LABEL: str = "wikipedia_sitelink"
SITELINK_BADGE_LABEL: str = "sitelink-badge"
SITELINK_LANGUAGE_LABEL: str = "sitelink-language"
SITELINK_SITE_LABEL: str = "sitelink-site"
SITELINK_TITLE_LABEL: str = "sitelink-title"
TYPE_LABEL: str = "type"
collector_q: typing.Optional[pyrallel.ShmQueue] = None
node_collector_q: typing.Optional[pyrallel.ShmQueue] = None
edge_collector_q: typing.Optional[pyrallel.ShmQueue] = None
qual_collector_q: typing.Optional[pyrallel.ShmQueue] = None
description_collector_q: typing.Optional[pyrallel.ShmQueue] = None
sitelink_collector_q: typing.Optional[pyrallel.ShmQueue] = None
class MyMapper(pyrallel.Mapper):
def enter(self):
print("Starting worker process {} (pid {}).".format(self._idx, os.getpid()), file=sys.stderr, flush=True)
self.first=True
self.cnt=0
self.write_mode='w'
self.node_f = None
if node_file and not collect_results:
self.node_f = open(node_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.node_wr = csv.writer(
self.node_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
self.edge_f = None
if detailed_edge_file and not collect_results:
self.edge_f = open(detailed_edge_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.edge_wr = csv.writer(
self.edge_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
self.qual_f = None
if detailed_qual_file and not collect_results:
self.qual_f = open(detailed_qual_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.qual_wr = csv.writer(
self.qual_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
if collect_results and collector_batch_size > 1:
self.collector_batch_cnt = 0
self.collector_nrows_batch = [ ]
self.collector_erows_batch = [ ]
self.collector_qrows_batch = [ ]
self.collector_description_erows_batch = [ ]
self.collector_sitelink_erows_batch = [ ]
self.process_row_data = \
node_file or \
entry_type_edges or \
label_edges or \
alias_edges or \
descr_edges
def exit(self, *args, **kwargs):
print("Exiting worker process {} (pid {}).".format(self._idx, os.getpid()), file=sys.stderr, flush=True)
if collect_results:
if collector_batch_size > 1:
if len(self.collector_nrows_batch) > 0 or len(self.collector_erows_batch) > 0 or len(self.collector_qrows_batch) > 0:
if collect_seperately:
if len(self.collector_nrows_batch) > 0:
node_collector_q.put(("rows", self.collector_nrows_batch, [], [], None))
if len(self.collector_erows_batch) > 0:
edge_collector_q.put(("rows", [], self.collector_erows_batch, [], None))
if len(self.collector_qrows_batch) > 0:
qual_collector_q.put(("rows", [], [], self.collector_qrows_batch, None))
if len(self.collector_description_erows_batch) > 0:
description_collector_q.put(("rows", [], self.collector_description_erows_batch, [], None))
if len(self.collector_sitelink_erows_batch) > 0:
sitelink_collector_q.put(("rows", [], self.collector_sitelink_erows_batch, [], None))
else:
collector_q.put(("rows", self.collector_nrows_batch, self.collector_erows_batch, self.collector_qrows_batch, None))
else:
if self.node_f is not None:
self.node_f.close()
if self.edge_f is not None:
self.edge_f.close()
if self.qual_f is not None:
self.qual_f.close()
def erows_append(self, erows, edge_id, node1, label, node2,
rank="",
magnitude="",
unit="",
date="",
item="",
lower="",
upper="",
latitude="",
longitude="",
wikidatatype="",
claim_id="",
claim_type="",
val_type="",
entity_type="",
datahash="",
precision="",
calendar="",
entrylang=""
):
if len(claim_type) > 0 and claim_type != "statement":
raise ValueError("Unexpected claim type %s" % claim_type)
if explode_values:
erows.append([edge_id,
node1,
label,
node2,
rank,
magnitude,
unit,
date,
item,
lower,
upper,
latitude,
longitude,
precision,
calendar,
entity_type,
wikidatatype,
entrylang,
]
)
else:
erows.append([edge_id,
node1,
label,
node2,
rank,
wikidatatype,
claim_id,
# claim_type,
val_type,
entity_type,
datahash,
precision,
calendar,
entrylang,
]
)
def qrows_append(self, qrows, edge_id, node1, label, node2,
magnitude="",
unit="",
date="",
item="",
lower="",
upper="",
latitude="",
longitude="",
wikidatatype="",
val_type="",
entity_type="",
datahash="",
precision="",
calendar="",
):
if minimal_qual_file is not None or detailed_qual_file is not None:
if explode_values:
qrows.append([edge_id,
node1,
label,
node2,
magnitude,
unit,
date,
item,
lower,
upper,
latitude,
longitude,
precision,
calendar,
entity_type,
wikidatatype,
])
else:
qrows.append([edge_id,
node1,
label,
node2,
wikidatatype,
val_type,
entity_type,
datahash,
precision,
calendar,
])
if interleave:
self.erows_append(erows,
edge_id=edge_id,
node1=node1,
label=label,
node2=node2,
magnitude=magnitude,
unit=unit,
date=date,
item=item,
lower=lower,
upper=upper,
latitude=latitude,
longitude=longitude,
wikidatatype=wikidatatype,
entity_type=entity_type,
datahash=datahash,
precision=precision,
calendar=calendar)
# def process(self,line,node_file,edge_file,qual_file,languages,source):
def process(self, line):
if progress_interval > 0 and self.cnt % progress_interval == 0 and self.cnt>0:
print("{} lines processed by processor {}".format(self.cnt,self._idx), file=sys.stderr, flush=True)
self.cnt+=1
# csv_line_terminator = "\n" if os.name == 'posix' else "\r\n"
nrows=[]
erows=[]
qrows=[]
description_erows = []
sitelink_erows = []
# These maps avoid avoid ID collisions due to hash collision or
# repeated values in the input data. We assume that a top-level
# property (obj["id"]) will not occur in multiple input lines.
alias_id_collision_map: typing.MutableMapping[str, int] = dict()
edge_id_collision_map: typing.MutableMapping[str, int] = dict()
qual_id_collision_map: typing.MutableMapping[str, int] = dict()
sitelink_id_collision_map: typing.MutableMapping[str, int] = dict()
clean_line = line.strip()
if clean_line.endswith(b","):
clean_line = clean_line[:-1]
if len(clean_line) > 1:
obj = json.loads(clean_line)
entry_type = obj["type"]
keep: bool = False
if entry_type == "item" or entry_type == "property":
keep = True
elif warn_if_missing:
print("Unknown object type {}.".format(entry_type), file=sys.stderr, flush=True)
if self.process_row_data and keep:
row = []
qnode = obj["id"]
row.append(qnode)
if parse_labels:
labels = obj.get("labels")
if labels is None:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its labels" % qnode)
elif warn_if_missing:
print("Object id {} has no labels.".format(qnode), file=sys.stderr, flush=True)
label_list=[]
if labels:
if all_languages:
label_languages = labels.keys()
else:
label_languages = languages
for lang in label_languages:
lang_label = labels.get(lang, None)
if lang_label:
# We needn't worry about duplicate label entries if this check passes.
if lang_label['language'] != lang:
print("*** Conflicting language key %s for the %s label for %s" % (repr(lang_label['language']), repr(lang), qnode),
file=sys.stderr, flush=True)
# lang_label['value']=lang_label['value'].replace('|','\\|')
# label_list.append('\'' + lang_label['value'].replace("'","\\'") + '\'' + "@" + lang)
value = KgtkFormat.stringify(lang_label['value'], language=lang)
label_list.append(value)
if label_edges:
langid: str = qnode + '-' + LABEL_LABEL + '-' + lang
self.erows_append(erows,
edge_id=langid,
node1=qnode,
label=LABEL_LABEL,
node2=value,
entrylang=lang)
if not node_id_only:
if len(label_list)>0:
row.append("|".join(label_list))
else:
row.append("")
if not node_id_only:
row.append(entry_type)
if entry_type_edges:
typeid: str = qnode + '-' + TYPE_LABEL + '-' + entry_type
self.erows_append(erows,
edge_id=typeid,
node1=qnode,
label=TYPE_LABEL,
node2=entry_type)
if parse_descr:
descriptions = obj.get("descriptions")
if descriptions is None:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its descriptions" % qnode)
elif warn_if_missing:
print("Object id {} has no descriptions.".format(qnode), file=sys.stderr, flush=True)
descr_list=[]
if descriptions:
if all_languages:
desc_languages = descriptions.keys()
else:
desc_languages = languages
for lang in desc_languages:
lang_descr = descriptions.get(lang, None)
if lang_descr:
# We needn't worry about duplicate description entries if this check passes.
if lang_descr['language'] != lang:
print("*** Conflicting language key %s for the %s description for %s" % (repr(lang_descr['language']), repr(lang), qnode),
file=sys.stderr, flush=True)
# lang_descr['value']=lang_descr['value'].replace('|','\\|')
# descr_list.append('\'' + lang_descr['value'].replace("'","\\'") + '\'' + "@" + lang)
value = KgtkFormat.stringify(lang_descr['value'], language=lang)
descr_list.append(value)
if descr_edges:
descrid: str = qnode + '-' + DESCRIPTION_LABEL + '-' + lang
self.erows_append(description_erows if collect_seperately else erows,
edge_id=descrid,
node1=qnode,
label=DESCRIPTION_LABEL,
node2=value,
entrylang=lang)
if not node_id_only:
if len(descr_list)>0:
row.append("|".join(descr_list))
else:
row.append("")
if parse_aliases:
aliases = obj.get("aliases")
if aliases is None:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its aliases" % qnode)
elif warn_if_missing:
print("Object id {} has no aliasees.".format(qnode), file=sys.stderr, flush=True)
alias_list = []
if aliases:
if all_languages:
alias_languages = aliases.keys()
else:
alias_languages = languages
for lang in alias_languages:
lang_aliases = aliases.get(lang, None)
if lang_aliases:
for item in lang_aliases:
# item['value']=item['value'].replace('|','\\|')
# alias_list.append('\'' + item['value'].replace("'","\\'") + '\'' + "@" + lang)
value = KgtkFormat.stringify(item['value'], language=lang)
alias_list.append(value)
if alias_edges:
# Hash the value to save space and avoid syntactic difficulties.
# Take a subset of the hash value to save space.
alias_value_hash: str = hashlib.sha256(value.encode('utf-8')).hexdigest()[:value_hash_width]
aliasid = qnode + '-' + ALIAS_LABEL + "-" + lang + '-' + alias_value_hash
alias_seq_no: int # In case of hash collision
if aliasid in alias_id_collision_map:
alias_seq_no = alias_id_collision_map[aliasid]
print("\n*** Alias collision #%d detected for %s (%s)" % (alias_seq_no, aliasid, value), file=sys.stderr, flush=True)
else:
alias_seq_no = 0
alias_id_collision_map[aliasid] = alias_seq_no + 1
aliasid += '-' + str(alias_seq_no)
self.erows_append(erows,
edge_id=aliasid,
node1=qnode,
label=ALIAS_LABEL,
node2=value,
entrylang=lang)
if not node_id_only:
if len(alias_list)>0:
row.append("|".join(alias_list))
else:
row.append("")
datatype = obj.get("datatype", "")
if not node_id_only:
row.append(datatype)
if len(datatype) > 0 and datatype_edges:
datatypeid: str = qnode + '-' + "datatype"
# We expect the datatype to be a valid KGTK symbol, so
# there's no need to stringify it.
self.erows_append(erows,
edge_id=datatypeid,
node1=qnode,
label=DATATYPE_LABEL,
node2=datatype)
#row.append(source)
if node_file:
nrows.append(row)
if parse_claims and "claims" not in obj:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its claims" % qnode)
elif warn_if_missing:
print("Object id {} is missing its claims.".format(qnode), file=sys.stderr, flush=True)
if parse_claims and "claims" in obj:
claims = obj["claims"]
if keep:
qnode = obj["id"]
for prop, claim_property in claims.items():
for cp in claim_property:
if (deprecated or cp['rank'] != 'deprecated'):
snaktype = cp['mainsnak']['snaktype']
rank=cp['rank']
claim_id = cp['id']
claim_type = cp['type']
if claim_type != "statement":
print("Unknown claim type %s" % claim_type, file=sys.stderr, flush=True)
if snaktype == 'value':
datavalue = cp['mainsnak']['datavalue']
val = datavalue.get('value')
val_type = datavalue.get("type", "")
elif snaktype == 'somevalue':
val = None
val_type = "somevalue"
elif snaktype == 'novalue':
val = None
val_type = "novalue"
else:
raise ValueError("Unknown snaktype %s" % snaktype)
typ = cp['mainsnak']['datatype']
# if typ != val_type:
# print("typ %s != val_type %s" % (typ, val_type), file=sys.stderr, flush=True)
value = ''
mag = ''
unit = ''
date=''
item=''
lower = ''
upper = ''
precision = ''
calendar = ''
lat = ''
long = ''
enttype = ''
if val is None:
value = val_type
elif typ.startswith('wikibase'):
enttype = val.get('entity-type')
value = val.get('id', '')
item=value
elif typ == 'quantity':
value = val['amount']
mag = val['amount']
if val.get(
'upperBound',
None) or val.get(
'lowerBound',
None):
lower = val.get('lowerBound', '')
upper = val.get('upperBound', '')
value += '[' + lower + \
',' + upper + ']'
# TODO: Don't lose the single-character unit code. At a minimum, verify that it is the value "1".
if len(val.get('unit')) > 1:
unit = val.get(
'unit').split('/')[-1]
if unit not in ["undefined"]:
# TODO: don't lose track of "undefined" units.
value += unit
elif typ == 'globe-coordinate':
lat = str(val['latitude'])
long = str(val['longitude'])
precision = str(val.get('precision', ''))
value = '@' + lat + '/' + long
# TODO: what about "globe"?
elif typ == 'time':
if val['time'][0]=='-':
pre="^-"
else:
pre="^"
date = pre + val['time'][1:]
precision = str(val['precision'])
calendar = val.get(
'calendarmodel', '').split('/')[-1]
value = pre + \
val['time'][1:] + '/' + str(val['precision'])
elif typ == 'monolingualtext':
# value = '\'' + \
# val['text'].replace("'","\\'").replace("|", "\\|") + '\'' + '@' + val['language']
value = KgtkFormat.stringify(val['text'], language=val['language'])
else:
# value = '\"' + val.replace('"','\\"').replace("|", "\\|") + '\"'
value = KgtkFormat.stringify(val)
if minimal_edge_file is not None or detailed_edge_file is not None:
prop_value_hash: str
if value.startswith(('P', 'Q')):
prop_value_hash = value
else:
prop_value_hash = hashlib.sha256(value.encode('utf-8')).hexdigest()[:value_hash_width]
edgeid: str = qnode + '-' + prop + '-' + prop_value_hash + '-'
if claim_id_hash_width == 0:
edgeid += claim_id.lower()
else:
edgeid += hashlib.sha256(claim_id.lower().encode('utf-8')).hexdigest()[:claim_id_hash_width]
prop_seq_no: int # In case of hash collision
if edgeid in edge_id_collision_map:
prop_seq_no = edge_id_collision_map[edgeid]
print("\n*** Edge collision #%d detected for %s (%s)" % (prop_seq_no, edgeid, value), file=sys.stderr, flush=True)
else:
prop_seq_no = 0
edge_id_collision_map[edgeid] = prop_seq_no + 1
edgeid += '-' + str(prop_seq_no)
self.erows_append(erows,
edge_id=edgeid,
node1=qnode,
label=prop,
node2=value,
rank=rank,
magnitude=mag,
unit=unit,
date=date,
item=item,
lower=lower,
upper=upper,
latitude=lat,
longitude=long,
wikidatatype=typ,
claim_id=claim_id,
claim_type=claim_type,
val_type=val_type,
entity_type=enttype,
precision=precision,
calendar=calendar)
if minimal_qual_file is not None or detailed_qual_file is not None or interleave:
if cp.get('qualifiers', None):
quals = cp['qualifiers']
for qual_prop, qual_claim_property in quals.items():
for qcp in qual_claim_property:
snaktype = qcp['snaktype']
if snaktype == 'value':
datavalue = qcp['datavalue']
val = datavalue.get('value')
val_type = datavalue.get("type", "")
elif snaktype == 'somevalue':
val = None
val_type = "somevalue"
elif snaktype == 'novalue':
val = None
val_type = "novalue"
else:
raise ValueError("Unknown qualifier snaktype %s" % snaktype)
if True:
value = ''
mag = ''
unit = ''
date= ''
item=''
lower = ''
upper = ''
precision = ''
calendar = ''
lat = ''
long = ''
enttype = ''
datahash = '"' + qcp['hash'] + '"'
typ = qcp['datatype']
if val is None:
value = val_type
elif typ.startswith(
'wikibase'):
enttype = val.get(
'entity-type')
value = val.get(
'id', '')
item=value
elif typ == 'quantity':
value = val['amount']
mag = val['amount']
if val.get(
'upperBound',
None) or val.get(
'lowerBound',
None):
lower = val.get(
'lowerBound', '')
upper = val.get(
'upperBound', '')
value += '[' + lower + \
',' + upper + ']'
if len(
val.get('unit')) > 1:
unit = val.get(
'unit').split('/')[-1]
value += unit
elif typ == 'globe-coordinate':
lat = str(
val['latitude'])
long = str(
val['longitude'])
precision = str(val.get(
'precision', ''))
value = '@' + lat + '/' + long
elif typ == 'time':
if val['time'][0]=='-':
pre="^-"
else:
pre="^"
date = pre + \
val['time'][1:]
precision = str(
val['precision'])
calendar = val.get(
'calendarmodel', '').split('/')[-1]
value = pre + \
val['time'][1:] + '/' + str(val['precision'])
elif typ == 'monolingualtext':
# value = '\'' + \
# val['text'].replace("'","\\'") + '\'' + '@' + val['language']
value = KgtkFormat.stringify(val['text'], language=val['language'])
else:
# value = '\"' + val.replace('"','\\"') + '\"'
value = KgtkFormat.stringify(val)
qual_value_hash: str
if value.startswith(('P', 'Q')):
qual_value_hash = value
else:
qual_value_hash = hashlib.sha256(value.encode('utf-8')).hexdigest()[:value_hash_width]
qualid: str = edgeid + '-' + qual_prop + '-' + qual_value_hash
qual_seq_no: int # In case of hash collision
if qualid in qual_id_collision_map:
qual_seq_no = qual_id_collision_map[qualid]
print("\n*** Qualifier collision #%d detected for %s (%s)" % (qual_seq_no, qualid, value), file=sys.stderr, flush=True)
else:
qual_seq_no = 0
qual_id_collision_map[qualid] = qual_seq_no + 1
qualid += '-' + str(qual_seq_no)
self.qrows_append(qrows,
edge_id=qualid,
node1=edgeid,
label=qual_prop,
node2=value,
magnitude=mag,
unit=unit,
date=date,
item=item,
lower=lower,
upper=upper,
latitude=lat,
longitude=long,
wikidatatype=typ,
entity_type=enttype,
datahash=datahash,
precision=precision,
calendar=calendar)
if parse_sitelinks:
sitelinks=obj.get('sitelinks',None)
else:
sitelinks = None
if sitelinks:
for link in sitelinks:
# TODO: If the title might contain vertical bar, more work is needed
# to make the sitetitle safe for KGTK.
if link.endswith('wiki') and link not in ('commonswiki', 'simplewiki'):
linklabel = SITELINK_LABEL
sitetitle='_'.join(sitelinks[link]['title'].split())
# The following leads to ambuiguity if there are both
# "afwiki" and "afwikibooks".
#
# TODO: Need to research the sitelink structure more fully.
sitelang=link.split('wiki')[0].replace('_','-')
sitelink='http://'+sitelang+'.wikipedia.org/wiki/'+sitetitle
else:
linklabel = ADDL_SITELINK_LABEL
sitetitle='_'.join(sitelinks[link]['title'].split())
if "wiki" in link:
# TODO: needs more work here.
sitelang=link.split("wiki")[0]
if sitelang in ("commons", "simple"):
sitelang = "en" # TODO: Need to retain the distinction we lose here.
else:
sitelang=""
sitehost=link+'.org' # TODO: Needs more work here
sitelink = 'http://'+sitehost+'/wiki/'+sitetitle
if sitelink is not None:
serows = sitelink_erows if collect_seperately else erows
sitelink_value_hash: str = hashlib.sha256(sitelink.encode('utf-8')).hexdigest()[:value_hash_width]
sitelinkid: str = qnode + '-' + linklabel + '-' + sitelink_value_hash
sitelink_seq_no: int = 0
if sitelinkid in sitelink_id_collision_map:
sitelink_seq_no = sitelink_id_collision_map[sitelinkid]
print("\n*** Sitelink collision #%d detected for %s (%s)" % (sitelink_seq_no, sitelinkid, sitelink), file=sys.stderr, flush=True)
else:
sitelink_seq_no = 0
sitelink_id_collision_map[sitelinkid] = sitelink_seq_no + 1
sitelinkid += '-' + str(sitelink_seq_no)
if sitelink_edges:
self.erows_append(serows,
edge_id=sitelinkid,
node1=qnode,
label=linklabel,
node2=sitelink,
entrylang=sitelang)
if sitelink_verbose_edges:
if len(sitelang) > 0:
self.erows_append(serows,
edge_id=sitelinkid + '-language-0',
node1=sitelinkid,
label=SITELINK_LANGUAGE_LABEL,
node2=sitelang,
entrylang=sitelang)
self.erows_append(serows,
edge_id=sitelinkid + '-site-0',
node1=sitelinkid,
label=SITELINK_SITE_LABEL,
node2=link,
entrylang=sitelang)
self.erows_append(serows,
edge_id=sitelinkid + '-title-0',
node1=sitelinkid,
label=SITELINK_TITLE_LABEL,
node2=KgtkFormat.stringify(sitelinks[link]['title']),
entrylang=sitelang)
for badge in sitelinks[link]['badges']:
badgeid = sitelinkid + '-badge-' + badge
self.erows_append(serows,
edge_id=badgeid,
node1=sitelinkid,
label=SITELINK_BADGE_LABEL,
node2=badge,
entrylang=sitelang)
if sitelink_verbose_qualifiers:
if len(sitelang) > 0:
self.qrows_append(qrows,
edge_id=sitelinkid + '-language-0',
node1=sitelinkid,
label=SITELINK_LANGUAGE_LABEL,
node2=sitelang)
self.qrows_append(qrows,
edge_id=sitelinkid + '-site-0',
node1=sitelinkid,
label=SITELINK_SITE_LABEL,
node2=link)
self.qrows_append(qrows,
edge_id=sitelinkid + '-title-0',
node1=sitelinkid,
label=SITELINK_TITLE_LABEL,
node2=KgtkFormat.stringify(sitelinks[link]['title']))
for badge in sitelinks[link]['badges']:
badgeid = sitelinkid + '-badge-' + badge
self.qrows_append(qrows,
edge_id=badgeid,
node1=sielinkid,
label=SITELINK_BADGE_LABEL,
node2=badge)
if len(nrows) > 0 or len(erows) > 0 or len(qrows) > 0 or len(description_erows) > 0 or len(sitelink_erows) > 0:
if collect_results:
if collector_batch_size == 1:
if collect_seperately:
if len(nrows) > 0 and node_collector_q is not None:
node_collector_q.put(("rows", nrows, [], [], None))
if len(erows) > 0 and edge_collector_q is not None:
edge_collector_q.put(("rows", [], erows, [], None))
if len(qrows) > 0 and qual_collector_q is not None:
qual_collector_q.put(("rows", nrows, [], [], None))
if len(description_erows) > 0 and description_collector_q is not None:
description_collector_q.put(("rows", [], description_erows, [], None))
if len(sitelink_erows) > 0 and sitelink_collector_q is not None:
sitelink_collector_q.put(("rows", [], sitelink_erows, [], None))
elif collector_q is not None:
collector_q.put(("rows", nrows, erows, qrows, None))
else:
self.collector_nrows_batch.extend(nrows)
self.collector_erows_batch.extend(erows)
self.collector_qrows_batch.extend(qrows)
if collect_seperately:
self.collector_description_erows_batch.extend(description_erows)
self.collector_sitelink_erows_batch.extend(sitelink_erows)
self.collector_batch_cnt += 1
if self.collector_batch_cnt >= collector_batch_size:
if collect_seperately:
if len(self.collector_nrows_batch) > 0 and node_collector_q is not None:
node_collector_q.put(("rows", self.collector_nrows_batch, [], [], None))
if len(self.collector_erows_batch) > 0 and edge_collector_q is not None:
edge_collector_q.put(("rows", [], self.collector_erows_batch, [], None))
if len(self.collector_qrows_batch) > 0 and qual_collector_q is not None:
qual_collector_q.put(("rows", [], [], self.collector_qrows_batch, None))
if len(self.collector_description_erows_batch) > 0 and description_collector_q is not None:
description_collector_q.put(("rows", [], self.collector_description_erows_batch, [], None))
self.collector_description_erows_batch.clear()
if len(self.collector_sitelink_erows_batch) > 0 and sitelink_collector_q is not None:
sitelink_collector_q.put(("rows", [], self.collector_sitelink_erows_batch, [], None))
self.collector_sitelink_erows_batch.clear()
elif collector_q is not None:
collector_q.put(("rows", self.collector_nrows_batch, self.collector_erows_batch, self.collector_qrows_batch, None))
self.collector_nrows_batch.clear()
self.collector_erows_batch.clear()
self.collector_qrows_batch.clear()
self.collector_batch_cnt = 0
else:
if node_file:
for row in nrows:
self.node_wr.writerow(row)
if detailed_edge_file:
for row in erows:
self.edge_wr.writerow(row)
if detailed_qual_file:
for row in qrows:
self.qual_wr.writerow(row)
class MyCollector:
def __init__(self):
# Prepare to use the collector.
self.node_f: typing.Optional[typing.TextIO] = None
self.node_wr = None
self.nrows: int = 0
self.minimal_edge_f: typing.Optional[typing.TextIO] = None
self.minimal_edge_wr = None
self.detailed_edge_f: typing.Optional[typing.TextIO] = None
self.detailed_edge_wr = None
self.erows: int = 0
self.minimal_qual_f: typing.Optional[typing.TextIO] = None
self.minimal_qual_wr = None
self.detailed_qual_f: typing.Optional[typing.TextIO] = None
self.detailed_qual_wr = None
self.qrows: int = 0
self.split_alias_f: typing.Optional[typing.TextIO] = None
self.split_alias_wr = None
self.n_alias_rows: int = 0
self.split_en_alias_f: typing.Optional[typing.TextIO] = None
self.split_en_alias_wr = None
self.n_en_alias_rows: int = 0
self.split_datatype_f: typing.Optional[typing.TextIO] = None
self.split_datatype_wr = None
self.n_datatype_rows: int = 0
self.split_description_f: typing.Optional[typing.TextIO] = None
self.split_description_wr = None
self.n_description_rows: int = 0
self.split_en_description_f: typing.Optional[typing.TextIO] = None
self.split_en_description_wr = None
self.n_en_description_rows: int = 0
self.split_label_f: typing.Optional[typing.TextIO] = None
self.split_label_wr = None
self.n_label_rows: int = 0
self.split_en_label_f: typing.Optional[typing.TextIO] = None
self.split_en_label_wr = None
self.n_en_label_rows: int = 0
self.split_sitelink_f: typing.Optional[typing.TextIO] = None
self.split_sitelink_wr = None
self.n_sitelink_rows: int = 0
self.split_en_sitelink_f: typing.Optional[typing.TextIO] = None
self.split_en_sitelink_wr = None
self.n_en_sitelink_rows: int = 0
self.split_type_f: typing.Optional[typing.TextIO] = None
self.split_type_wr = None
self.n_type_rows: int = 0
self.split_property_edge_f: typing.Optional[typing.TextIO] = None
self.split_property_edge_wr = None
self.n_property_edge_rows: int = 0
self.split_property_qual_f: typing.Optional[typing.TextIO] = None
self.split_property_qual_wr = None
self.n_property_qual_rows: int = 0
self.process_split_files: bool = False
self.setup_split_dispatcher()
self.cnt: int = 0
self.started: bool = False
def run(self,
collector_q,
who: str):
print("The %s collector is starting (pid %d)." % (who, os.getpid()), file=sys.stderr, flush=True)
while True:
action, nrows, erows, qrows, header = collector_q.get()
# print("Collector action %s." % action, file=sys.stderr, flush=True)
if action == "rows":
self.collect(nrows, erows, qrows, who)
elif action == "node_header":
self.open_node_file(header, who)
elif action == "minimal_edge_header":
self.open_minimal_edge_file(header, who)
self.process_split_files = True
elif action == "detailed_edge_header":
self.open_detailed_edge_file(header, who)
elif action == "minimal_qual_header":
self.open_minimal_qual_file(header, who)
elif action == "detailed_qual_header":
self.open_detailed_qual_file(header, who)
elif action == "split_alias_header":
self.open_split_alias_file(header, who)
self.process_split_files = True
elif action == "split_en_alias_header":
self.open_split_en_alias_file(header, who)
self.process_split_files = True
elif action == "split_datatype_header":
self.open_split_datatype_file(header, who)
self.process_split_files = True
elif action == "split_description_header":
self.open_split_description_file(header, who)
self.process_split_files = True
elif action == "split_en_description_header":
self.open_split_en_description_file(header, who)
self.process_split_files = True
elif action == "split_label_header":
self.open_split_label_file(header, who)
self.process_split_files = True
elif action == "split_en_label_header":
self.open_split_en_label_file(header, who)
self.process_split_files = True
elif action == "split_sitelink_header":
self.open_split_sitelink_file(header, who)
self.process_split_files = True
elif action == "split_en_sitelink_header":
self.open_split_en_sitelink_file(header, who)
self.process_split_files = True
elif action == "split_type_header":
self.open_split_type_file(header, who)
self.process_split_files = True
elif action == "split_property_edge_header":
self.open_split_property_edge_file(header, who)
self.process_split_files = True
elif action == "split_property_qual_header":
self.open_split_property_qual_file(header, who)
elif action == "shutdown":
self.shutdown(who)
break
def _open_file(self, the_file: typing.Optional[str], header: typing.List[str], file_type: str, who: str):
if the_file is None or len(the_file) == 0:
raise ValueError("%s header without a %s file in the %s collector." % (file_type, file_type, who))
f: typing.Optional[typing.TextIO]
wr: typing.Any
if use_kgtkwriter:
from kgtk.io.kgtkwriter import KgtkWriter
print("Opening the %s file in the %s collector with KgtkWriter: %s" % (file_type, who, the_file), file=sys.stderr, flush=True)
wr = KgtkWriter.open(header, Path(the_file), who=who + " collector", use_mgzip=use_mgzip_for_output, mgzip_threads=mgzip_threads_for_output)
return None, wr
else:
print("Opening the %s file in the %s collector with csv.writer." % (file_type, who), file=sys.stderr, flush=True)
csv_line_terminator = "\n" if os.name == 'posix' else "\r\n"
f = open(the_file, "w", newline='')
wr = csv.writer(
f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(header)
return f, wr
def open_node_file(self, header: typing.List[str], who: str):
self.node_f, self.node_wr = self._open_file(node_file, header, "node", who)
def open_minimal_edge_file(self, header: typing.List[str], who: str):
self.minimal_edge_f, self.minimal_edge_wr = self._open_file(minimal_edge_file, header, "minimal edge", who)
def open_detailed_edge_file(self, header: typing.List[str], who: str):
self.detailed_edge_f, self.detailed_edge_wr = self._open_file(detailed_edge_file, header, "detailed edge", who)
def open_minimal_qual_file(self, header: typing.List[str], who: str):
self.minimal_qual_f, self.minimal_qual_wr = self._open_file(minimal_qual_file, header, "minimal qual", who)
def open_detailed_qual_file(self, header: typing.List[str], who: str):
self.detailed_qual_f, self.detailed_qual_wr = self._open_file(detailed_qual_file, header, "qual", who)
def open_split_alias_file(self, header: typing.List[str], who: str):
self.split_alias_f, self.split_alias_wr = self._open_file(split_alias_file, header, ALIAS_LABEL, who)
def open_split_en_alias_file(self, header: typing.List[str], who: str):
self.split_en_alias_f, self.split_en_alias_wr = self._open_file(split_en_alias_file, header, "English " + ALIAS_LABEL, who)
def open_split_datatype_file(self, header: typing.List[str], who: str):
self.split_datatype_f, self.split_datatype_wr = self._open_file(split_datatype_file, header, DATATYPE_LABEL, who)
def open_split_description_file(self, header: typing.List[str], who: str):
self.split_description_f, self.split_description_wr = self._open_file(split_description_file, header, DESCRIPTION_LABEL, who)
def open_split_en_description_file(self, header: typing.List[str], who: str):
self.split_en_description_f, self.split_en_description_wr = self._open_file(split_en_description_file, header, "English " + DESCRIPTION_LABEL, who)
def open_split_label_file(self, header: typing.List[str], who: str):
self.split_label_f, self.split_label_wr = self._open_file(split_label_file, header, LABEL_LABEL, who)
def open_split_en_label_file(self, header: typing.List[str], who: str):
self.split_en_label_f, self.split_en_label_wr = self._open_file(split_en_label_file, header, "English " + LABEL_LABEL, who)
def open_split_sitelink_file(self, header: typing.List[str], who: str):
self.split_sitelink_f, self.split_sitelink_wr = self._open_file(split_sitelink_file, header, SITELINK_LABEL, who)
def open_split_en_sitelink_file(self, header: typing.List[str], who: str):
self.split_en_sitelink_f, self.split_en_sitelink_wr = self._open_file(split_en_sitelink_file, header, "English " + SITELINK_LABEL, who)
def open_split_type_file(self, header: typing.List[str], who: str):
self.split_type_f, self.split_type_wr = self._open_file(split_type_file, header, TYPE_LABEL, who)
def open_split_property_edge_file(self, header: typing.List[str], who: str):
self.split_property_edge_f, self.split_property_edge_wr = self._open_file(split_property_edge_file, header, "property edge", who)
def open_split_property_qual_file(self, header: typing.List[str], who: str):
self.split_property_qual_f, self.split_property_qual_wr = self._open_file(split_property_qual_file, header, "property qual", who)
def shutdown(self, who: str):
print("Exiting the %s collector (pid %d)." % (who, os.getpid()), file=sys.stderr, flush=True)
if use_kgtkwriter:
if self.node_wr is not None:
self.node_wr.close()
if self.minimal_edge_wr is not None:
self.minimal_edge_wr.close()
if self.detailed_edge_wr is not None:
self.detailed_edge_wr.close()
if self.minimal_qual_wr is not None:
self.minimal_qual_wr.close()
if self.detailed_qual_wr is not None:
self.detailed_qual_wr.close()
if self.split_alias_wr is not None:
self.split_alias_wr.close()
if self.split_en_alias_wr is not None:
self.split_en_alias_wr.close()
if self.split_datatype_wr is not None:
self.split_datatype_wr.close()
if self.split_description_wr is not None:
self.split_description_wr.close()
if self.split_en_description_wr is not None:
self.split_en_description_wr.close()
if self.split_label_wr is not None:
self.split_label_wr.close()
if self.split_en_label_wr is not None:
self.split_en_label_wr.close()
if self.split_sitelink_wr is not None:
self.split_sitelink_wr.close()
if self.split_en_sitelink_wr is not None:
self.split_en_sitelink_wr.close()
if self.split_type_wr is not None:
self.split_type_wr.close()
if self.split_property_edge_wr is not None:
self.split_property_edge_wr.close()
if self.split_property_edge_wr is not None:
self.split_property_edge_wr.close()
else:
if self.node_f is not None:
self.node_f.close()
if self.minimal_edge_f is not None:
self.minimal_edge_f.close()
if self.detailed_edge_f is not None:
self.detailed_edge_f.close()
if self.minimal_qual_f is not None:
self.minimal_qual_f.close()
if self.detailed_qual_f is not None:
self.detailed_qual_f.close()
if self.split_alias_f is not None:
self.split_alias_f.close()
if self.split_en_alias_f is not None:
self.split_en_alias_f.close()
if self.split_datatype_f is not None:
self.split_datatype_f.close()
if self.split_description_f is not None:
self.split_description_f.close()
if self.split_en_description_f is not None:
self.split_en_description_f.close()
if self.split_label_f is not None:
self.split_label_f.close()
if self.split_en_label_f is not None:
self.split_en_label_f.close()
if self.split_sitelink_f is not None:
self.split_sitelink_f.close()
if self.split_en_sitelink_f is not None:
self.split_en_sitelink_f.close()
if self.split_type_f is not None:
self.split_type_f.close()
if self.split_property_edge_f is not None:
self.split_property_edge_f.close()
if self.split_property_qual_f is not None:
self.split_property_qual_f.close()
print("The %s collector has closed its output files." % who, file=sys.stderr, flush=True)
def collect(self,
nrows: typing.List[typing.List[str]],
erows: typing.List[typing.List[str]],
qrows: typing.List[typing.List[str]],
who: str):
self.nrows += len(nrows)
self.erows += len(erows)
self.qrows += len(qrows)
self.cnt += 1
if progress_interval > 0 and self.cnt % progress_interval == 0:
print("The {} collector called {} times: {} nrows, {} erows, {} qrows".format(who,
self.cnt,
self.nrows,
self.erows,
self.qrows), file=sys.stderr, flush=True)
row: typing.List[str]
if len(nrows) > 0:
if self.node_wr is None:
raise ValueError("Unexpected node rows in the %s collector." % who)
if use_kgtkwriter:
for row in nrows:
self.node_wr.write(row)
else:
self.node_wr.writerows(nrows)
if len(erows) > 0:
if use_kgtkwriter:
if not self.process_split_files:
if self.detailed_edge_wr is None:
raise ValueError("Unexpected edge rows in the %s collector." % who)
for row in erows:
self.detailed_edge_wr.write(row)
else:
for row in erows:
split: bool = False
label: str = row[2] # Hack: knows the structure of the row.
method: typing.Optional[typing.Callable[[typing.List[str]], bool]] = self.split_dispatcher.get(label)
if method is not None:
split = method(row)
if not split:
if self.minimal_edge_wr is None and self.detailed_edge_wr is None and self.split_property_edge_wr is None:
raise ValueError("Unexpected %s edge rows in the %s collector." % (label, who))
if self.split_property_edge_wr is not None and row[1].startswith("P"): # Hack: knows the structure of the row.
# For now, split property files are minimal.
self.split_property_edge_wr.write((row[0], row[1], row[2], row[3], row[4], row[5])) # Hack: knows the structure of the row.
elif self.minimal_edge_wr is not None:
self.minimal_edge_wr.write((row[0], row[1], row[2], row[3], row[4], row[5])) # Hack: knows the structure of the row.
if self.detailed_edge_wr is not None:
self.detailed_edge_wr.write(row)
else:
if self.minimal_edge_wr is None:
raise ValueError("Unexpected edge rows in the %s collector." % who)
self.minimal_edge_wr.writerows(erows)
if len(qrows) > 0:
if use_kgtkwriter:
if self.minimal_qual_wr is None and self.detailed_qual_wr is None:
raise ValueError("Unexpected qual rows in the %s collector." % who)
for row in qrows:
if self.split_property_qual_wr is not None and row[0].startswith("P"): # Hack: knows the structure of the row.
self.split_property_qual_wr.write((row[0], row[1], row[2], row[3], row[4])) # Hack: knows the structure of the row.
elif self.minimal_qual_wr is not None:
self.minimal_qual_wr.write((row[0], row[1], row[2], row[3], row[4])) # Hack: knows the structure of the row.
if self.detailed_qual_wr is not None:
self.detailed_qual_wr.write(row)
else:
if self.detailed_qual_wr is None:
raise ValueError("Unexpected qual rows in the %s collector." % who)
self.detailed_qual_wr.writerows(qrows)
def setup_split_dispatcher(self):
self.split_dispatcher: typing.MutableMapping[str, typing.Callable[[typing.List[str]], bool]] = dict()
self.split_dispatcher[ADDL_SITELINK_LABEL] = self.split_sitelink
self.split_dispatcher[ALIAS_LABEL] = self.split_alias
self.split_dispatcher[DATATYPE_LABEL] = self.split_datatype
self.split_dispatcher[DESCRIPTION_LABEL] = self.split_description
self.split_dispatcher[LABEL_LABEL] = self.split_label
self.split_dispatcher[SITELINK_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_BADGE_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_LANGUAGE_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_SITE_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_TITLE_LABEL] = self.split_sitelink
self.split_dispatcher[TYPE_LABEL] = self.split_type
def split_alias(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_alias_wr is not None:
self.split_alias_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split= True
if self.split_en_alias_wr is not None and lang == "en":
self.split_en_alias_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_datatype(self, row: typing.List[str])->bool:
split: bool = False
if self.split_datatype_wr is not None:
self.split_datatype_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_description(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_description_wr is not None:
self.split_description_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split = True
if self.split_en_description_wr is not None and lang == "en":
self.split_en_description_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_label(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_label_wr is not None:
self.split_label_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split = True
if self.split_en_label_wr is not None and lang == "en":
self.split_en_label_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_sitelink(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_sitelink_wr is not None:
self.split_sitelink_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split = True
if self.split_en_sitelink_wr is not None and lang == "en":
self.split_en_sitelink_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_type(self, row: typing.List[str])->bool:
split: bool = False
if self.split_type_wr is not None:
self.split_type_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
try:
UPDATE_VERSION: str = "2020-12-08T23:35:07.113207+00:00#g4xo5tTabYAJX0cxMKB6wjezb1k3fGAPtNPYELzeAmrESNU2wiKR2wQVS4cBMsjz9KGTL0J0Mmp0pE+iLSTYOQ=="
print("kgtk import-wikidata version: %s" % UPDATE_VERSION, file=sys.stderr, flush=True)
print("Starting main process (pid %d)." % os.getpid(), file=sys.stderr, flush=True)
inp_path = KGTKArgumentParser.get_input_file(input_file)
csv_line_terminator = "\n" if os.name == 'posix' else "\r\n"
start=time.time()
if not skip_processing:
from gzip import GzipFile
print("Processing.", file=sys.stderr, flush=True)
# Open the input file first to make it easier to monitor with "pv".
input_f: typing.Union[GzipFile, typing.IO[typing.Any]]
if str(inp_path) == "-":
print('Processing wikidata from standard input', file=sys.stderr, flush=True)
# It is not well documented, but this is how you read binary data
# from stdin in Python 3.
#
# TODO: Add decompression.
input_f = sys.stdin.buffer
else:
print('Processing wikidata file %s' % str(inp_path), file=sys.stderr, flush=True)
input_f = open(inp_path, mode='rb')
progress_startup(fd=input_f.fileno()) # Start the custom progress monitor.
if str(inp_path).endswith(".bz2"):
print('Decompressing (bz2)', file=sys.stderr, flush=True)
# TODO: Optionally use a system decompression program.
input_f = bz2.open(input_f)
elif str(inp_path).endswith(".gz"):
# TODO: Optionally use a system decompression program.
if use_mgzip_for_input:
import mgzip
print('Decompressing (mgzip)', file=sys.stderr, flush=True)
input_f = mgzip.open(input_f, thread=mgzip_threads_for_input)
else:
import gzip
print('Decompressing (gzip)', file=sys.stderr, flush=True)
input_f = gzip.open(input_f)
collector_p = None
node_collector_p = None
edge_collector_p = None
qual_collector_p = None
description_collector_p = None
sitelink_collector_p = None
if collect_results:
print("Creating the collector queue.", file=sys.stderr, flush=True)
# collector_q = pyrallel.ShmQueue()
collector_q_maxsize = procs*collector_queue_per_proc_size
if collect_seperately:
if node_file is not None:
node_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector node queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the node_collector.", file=sys.stderr, flush=True)
node_collector: MyCollector = MyCollector()
print("Creating the node collector process.", file=sys.stderr, flush=True)
node_collector_p = mp.Process(target=node_collector.run, args=(node_collector_q, "node"))
print("Starting the node collector process.", file=sys.stderr, flush=True)
node_collector_p.start()
print("Started the node collector process.", file=sys.stderr, flush=True)
if minimal_edge_file is not None or detailed_edge_file is not None:
edge_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector edge queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the edge_collector.", file=sys.stderr, flush=True)
edge_collector: MyCollector = MyCollector()
print("Creating the edge collector process.", file=sys.stderr, flush=True)
edge_collector_p = mp.Process(target=edge_collector.run, args=(edge_collector_q, "edge"))
print("Starting the edge collector process.", file=sys.stderr, flush=True)
edge_collector_p.start()
print("Started the edge collector process.", file=sys.stderr, flush=True)
if minimal_qual_file is not None or detailed_qual_file is not None:
qual_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector qual queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the qual_collector.", file=sys.stderr, flush=True)
qual_collector: MyCollector = MyCollector()
print("Creating the qual collector process.", file=sys.stderr, flush=True)
qual_collector_p = mp.Process(target=qual_collector.run, args=(qual_collector_q, "qual"))
print("Starting the qual collector process.", file=sys.stderr, flush=True)
qual_collector_p.start()
print("Started the qual collector process.", file=sys.stderr, flush=True)
if split_description_file is not None:
description_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector description queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the description collector.", file=sys.stderr, flush=True)
description_collector: MyCollector = MyCollector()
print("Creating the description collector process.", file=sys.stderr, flush=True)
description_collector_p = mp.Process(target=description_collector.run, args=(description_collector_q, "description"))
print("Starting the description collector process.", file=sys.stderr, flush=True)
description_collector_p.start()
print("Started the description collector process.", file=sys.stderr, flush=True)
if split_sitelink_file is not None:
sitelink_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector sitelink queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the sitelink collector.", file=sys.stderr, flush=True)
sitelink_collector: MyCollector = MyCollector()
print("Creating the sitelink collector process.", file=sys.stderr, flush=True)
sitelink_collector_p = mp.Process(target=sitelink_collector.run, args=(sitelink_collector_q, "sitelink"))
print("Starting the sitelink collector process.", file=sys.stderr, flush=True)
sitelink_collector_p.start()
print("Started the sitelink collector process.", file=sys.stderr, flush=True)
else:
collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The common collector queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the common collector.", file=sys.stderr, flush=True)
collector: MyCollector = MyCollector()
print("Creating the common collector process.", file=sys.stderr, flush=True)
collector_p = mp.Process(target=collector.run, args=(collector_q, "common"))
print("Starting the common collector process.", file=sys.stderr, flush=True)
collector_p.start()
print("Started the common collector process.", file=sys.stderr, flush=True)
if node_file:
if node_id_only:
node_file_header = ['id']
else:
node_file_header = ['id','label','type','description','alias','datatype']
ncq = collector_q if collector_q is not None else node_collector_q
if ncq is not None:
print("Sending the node header to the collector.", file=sys.stderr, flush=True)
ncq.put(("node_header", None, None, None, node_file_header))
print("Sent the node header to the collector.", file=sys.stderr, flush=True)
else:
with open(node_file+'_header', 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(node_file_header)
if explode_values:
edge_file_header = ['id','node1','label','node2','rank','node2;magnitude','node2;unit','node2;date','node2;item','node2;lower','node2;upper',
'node2;latitude','node2;longitude','node2;precision','node2;calendar','node2;entity-type','node2;wikidatatype', 'lang']
else:
edge_file_header = ['id','node1','label','node2',
'rank', 'node2;wikidatatype',
'claim_id', 'val_type', 'entity_type', 'datahash', 'precision', 'calendar', 'lang']
ecq = collector_q if collector_q is not None else edge_collector_q
if detailed_edge_file:
if ecq is not None:
print("Sending the detailed edge header to the collector.", file=sys.stderr, flush=True)
ecq.put(("detailed_edge_header", None, None, None, edge_file_header))
print("Sent the detailed edge header to the collector.", file=sys.stderr, flush=True)
else:
with open(detailed_edge_file+'_header', 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(edge_file_header)
if minimal_edge_file and ecq is not None:
print("Sending the minimal edge file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("minimal_edge_header", None, None, None, edge_file_header[0:6]))
print("Sent the minimal edge file header to the collector.", file=sys.stderr, flush=True)
if split_alias_file and ecq is not None:
alias_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the alias file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_alias_header", None, None, None, alias_file_header))
print("Sent the alias file header to the collector.", file=sys.stderr, flush=True)
if split_en_alias_file and ecq is not None:
en_alias_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English alias file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_en_alias_header", None, None, None, en_alias_file_header))
print("Sent the English alias file header to the collector.", file=sys.stderr, flush=True)
if split_datatype_file and ecq is not None:
datatype_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the datatype file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_datatype_header", None, None, None, datatype_file_header))
print("Sent the datatype file header to the collector.", file=sys.stderr, flush=True)
dcq = collector_q if collector_q is not None else description_collector_q
if split_description_file and dcq is not None:
description_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the description file header to the collector.", file=sys.stderr, flush=True)
dcq.put(("split_description_header", None, None, None, description_file_header))
print("Sent the description file header to the collector.", file=sys.stderr, flush=True)
if split_en_description_file and dcq is not None:
en_description_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English description file header to the collector.", file=sys.stderr, flush=True)
dcq.put(("split_en_description_header", None, None, None, en_description_file_header))
print("Sent the English description file header to the collector.", file=sys.stderr, flush=True)
if split_label_file and ecq is not None:
label_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the label file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_label_header", None, None, None, label_file_header))
print("Sent the label file header to the collector.", file=sys.stderr, flush=True)
if split_en_label_file and ecq is not None:
en_label_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English label file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_en_label_header", None, None, None, en_label_file_header))
print("Sent the English label file header to the collector.", file=sys.stderr, flush=True)
scq = collector_q if collector_q is not None else sitelink_collector_q
if split_sitelink_file and scq is not None:
sitelink_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the sitelink file header to the collector.", file=sys.stderr, flush=True)
scq.put(("split_sitelink_header", None, None, None, sitelink_file_header))
print("Sent the sitelink file header to the collector.", file=sys.stderr, flush=True)
if split_en_sitelink_file and scq is not None:
en_sitelink_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English sitelink file header to the collector.", file=sys.stderr, flush=True)
scq.put(("split_en_sitelink_header", None, None, None, en_sitelink_file_header))
print("Sent the English sitelink file header to the collector.", file=sys.stderr, flush=True)
if split_type_file and ecq is not None:
type_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the entry type file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_type_header", None, None, None, type_file_header))
print("Sent the entry type file header to the collector.", file=sys.stderr, flush=True)
if split_property_edge_file and ecq is not None:
print("Sending the property edge file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_property_edge_header", None, None, None, edge_file_header[0:6]))
print("Sent the property edge file header to the collector.", file=sys.stderr, flush=True)
if minimal_qual_file is not None or detailed_qual_file is not None or split_property_qual_file is not None:
qual_file_header = edge_file_header.copy()
if "rank" in qual_file_header:
qual_file_header.remove('rank')
if "claim_type" in qual_file_header:
qual_file_header.remove('claim_type')
if "claim_id" in qual_file_header:
qual_file_header.remove('claim_id')
if "lang" in qual_file_header:
qual_file_header.remove('lang')
qcq = collector_q if collector_q is not None else qual_collector_q
if detailed_qual_file is not None:
if qcq is not None:
print("Sending the detailed qual file header to the collector.", file=sys.stderr, flush=True)
qcq.put(("detailed_qual_header", None, None, None, qual_file_header))
print("Sent the detailed qual file header to the collector.", file=sys.stderr, flush=True)
else:
with open(detailed_qual_file+'_header', 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(qual_file_header)
if minimal_qual_file is not None and qcq is not None:
print("Sending the minimal qual file header to the collector.", file=sys.stderr, flush=True)
qcq.put(("minimal_qual_header", None, None, None, qual_file_header[0:5]))
print("Sent the minimal qual file header to the collector.", file=sys.stderr, flush=True)
if split_property_qual_file and qcq is not None:
print("Sending the property qual file header to the collector.", file=sys.stderr, flush=True)
qcq.put(("split_property_qual_header", None, None, None, qual_file_header[0:5]))
print("Sent the property qual file header to the collector.", file=sys.stderr, flush=True)
print('Creating parallel processor for {}'.format(str(inp_path)), file=sys.stderr, flush=True)
if use_shm or single_mapper_queue:
pp = pyrallel.ParallelProcessor(procs, MyMapper,enable_process_id=True, max_size_per_mapper_queue=max_size_per_mapper_queue,
use_shm=use_shm, enable_collector_queues=False, batch_size=mapper_batch_size,
single_mapper_queue=single_mapper_queue)
else:
pp = pyrallel.ParallelProcessor(procs, MyMapper,enable_process_id=True, max_size_per_mapper_queue=max_size_per_mapper_queue,
batch_size=mapper_batch_size)
print('Start parallel processing', file=sys.stderr, flush=True)
pp.start()
for cnt, line in enumerate(input_f):
if limit and cnt >= limit:
break
# pp.add_task(line,node_file,edge_file,qual_file,languages,source)
pp.add_task(line)
print('Done processing {}'.format(str(inp_path)), file=sys.stderr, flush=True)
input_f.close()
print('Telling the workers to shut down.', file=sys.stderr, flush=True)
pp.task_done()
print('Waiting for the workers to shut down.', file=sys.stderr, flush=True)
pp.join()
print('Worker shut down is complete.', file=sys.stderr, flush=True)
if collector_q is not None:
print('Telling the collector to shut down.', file=sys.stderr, flush=True)
collector_q.put(("shutdown", None, None, None, None))
if collector_p is not None:
print('Waiting for the collector to shut down.', file=sys.stderr, flush=True)
collector_p.join()
print('Collector shut down is complete.', file=sys.stderr, flush=True)
if collector_q is not None:
collector_q.close()
if node_collector_q is not None:
print('Telling the node collector to shut down.', file=sys.stderr, flush=True)
node_collector_q.put(("shutdown", None, None, None, None))
if node_collector_p is not None:
print('Waiting for the node collector to shut down.', file=sys.stderr, flush=True)
node_collector_p.join()
print('Node collector shut down is complete.', file=sys.stderr, flush=True)
if node_collector_q is not None:
node_collector_q.close()
if edge_collector_q is not None:
print('Telling the edge collector to shut down.', file=sys.stderr, flush=True)
edge_collector_q.put(("shutdown", None, None, None, None))
if edge_collector_p is not None:
print('Waiting for the edge collector to shut down.', file=sys.stderr, flush=True)
edge_collector_p.join()
print('Edge collector shut down is complete.', file=sys.stderr, flush=True)
if edge_collector_q is not None:
edge_collector_q.close()
if qual_collector_q is not None:
print('Telling the qual collector to shut down.', file=sys.stderr, flush=True)
qual_collector_q.put(("shutdown", None, None, None, None))
if qual_collector_p is not None:
print('Waiting for the qual collector to shut down.', file=sys.stderr, flush=True)
qual_collector_p.join()
print('Qual collector shut down is complete.', file=sys.stderr, flush=True)
if qual_collector_q is not None:
qual_collector_q.close()
if description_collector_q is not None:
print('Telling the description collector to shut down.', file=sys.stderr, flush=True)
description_collector_q.put(("shutdown", None, None, None, None))
if description_collector_p is not None:
print('Waiting for the description collector to shut down.', file=sys.stderr, flush=True)
description_collector_p.join()
print('Description collector shut down is complete.', file=sys.stderr, flush=True)
if description_collector_q is not None:
description_collector_q.close()
if sitelink_collector_q is not None:
print('Telling the sitelink collector to shut down.', file=sys.stderr, flush=True)
sitelink_collector_q.put(("shutdown", None, None, None, None))
if sitelink_collector_p is not None:
print('Waiting for the sitelink collector to shut down.', file=sys.stderr, flush=True)
sitelink_collector_p.join()
print('Sitelink collector shut down is complete.', file=sys.stderr, flush=True)
if sitelink_collector_q is not None:
sitelink_collector_q.close()
if not skip_merging and not collect_results:
# We've finished processing the input data, possibly using multiple
# server processes. We need to assemble the final output file(s) with
# the header first, then the fragments produced by parallel
# processing.
#
# If we assume that we are on Linux, then os.sendfile(...)
# should provide the simplest, highest-performing solution.
if node_file:
print('Combining the node file fragments', file=sys.stderr, flush=True)
node_file_fragments=[node_file+'_header']
for n in range(procs):
node_file_fragments.append(node_file+'_'+str(n))
platform_cat(node_file_fragments, node_file, remove=not keep_temp_files, use_python_cat=use_python_cat, verbose=True)
if detailed_edge_file:
print('Combining the edge file fragments', file=sys.stderr, flush=True)
edge_file_fragments=[detailed_edge_file+'_header']
for n in range(procs):
edge_file_fragments.append(detailed_edge_file+'_'+str(n))
platform_cat(edge_file_fragments, detailed_edge_file, remove=not keep_temp_files, use_python_cat=use_python_cat, verbose=True)
if detailed_qual_file:
print('Combining the qualifier file fragments', file=sys.stderr, flush=True)
qual_file_fragments=[detailed_qual_file+'_header']
for n in range(procs):
qual_file_fragments.append(detailed_qual_file+'_'+str(n))
platform_cat(qual_file_fragments, detailed_qual_file, remove=not keep_temp_files, use_python_cat=use_python_cat, verbose=True)
print('import complete', file=sys.stderr, flush=True)
end=time.time()
print('time taken : {}s'.format(end-start), file=sys.stderr, flush=True)
except Exception as e:
raise KGTKException(str(e))
|
test_orca.py
|
#!/usr/bin/env python
import rvo2
import matplotlib.pyplot as plt
import numpy as np
import random
import rospy
import time
import threading
import math
from utils import *
from nav_msgs.msg import Odometry
import A1easyGo as easyGo
import sys
sys.path.append("..")
import A1scan2obs as pc2obs
rospy.init_node('A1_mvs', anonymous=False)
SIMUL_HZ = 10.0
sim = rvo2.PyRVOSimulator(1/SIMUL_HZ, 15.0, 10, 5.0, 2.0, 0.15, 3.0)
COL = 10.0
ROW = 10.0
voxel_size = 0.5
size = voxel_size/2
#ROBOT MOVE
SPEED = 20 # 14
ROTATE_SPEED = 15 # 25
def GoEasy(direc):
if direc == 4: # Backward
easyGo.mvStraight(- SPEED, -1)
elif direc == 0 or direc == 1: # Go straight
easyGo.mvStraight(SPEED, -1)
elif direc == 2: # turn left
easyGo.mvRotate(ROTATE_SPEED, -1, False)
elif direc == 3: # turn right
easyGo.mvRotate(ROTATE_SPEED, -1, True)
elif direc == 5: # stop
easyGo.stop()
#MIN_OBS_SIZE = 0.6 / 2
#MAX_OBS_SIZE = 1.4 / 2
# make random square object
'''
obs_center_size = [(random.uniform(-COL, COL), random.uniform(0, ROW), random.uniform(MIN_OBS_SIZE, MAX_OBS_SIZE)) for i in range(15)]
# osb_position must be convex in counter clock wise order
obs_position_list = [[(x-size, y-size),(x+size, y-size), (x+size, y+size), (x-size, y+size)] for x,y,size in obs_center_size]
obs = [sim.addObstacle(obs_position) for obs_position in obs_position_list]
'''
# single obstacle for test
# obs_position_list = [[(6.1,6.1), (4.1, 6.1), (4.1, 4.1)]]
#o1 = sim.addObstacle([(6.1,6.1), (4.1, 6.1), (4.1, 4.1)])
# obs_position_list = np.array(obs_position_list)
global obs_pos, self_pos, self_yaw
obs_pos = [[0, 0], [0, 0]]
self_pos = [0, 0]
self_yaw = 0.0
def ob1_callback(data):
global self_pos
global obs_pos
global self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
relative_x = _x - self_pos[1]
relative_y = _y - self_pos[0]
x2 = math.cos(1.57-self_yaw) * relative_x - math.sin(1.57-self_yaw) * relative_y
y2 = math.sin(1.57-self_yaw) * relative_x + math.cos(1.57-self_yaw) * relative_y
obs_pos[0] = [x2, y2]
def ob2_callback(data):
global self_pos
global obs_pos
global self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
relative_x = _x - self_pos[1]
relative_y = _y - self_pos[0]
x2 = math.cos(1.57-self_yaw) * relative_x - math.sin(1.57-self_yaw) * relative_y
y2 = math.sin(1.57-self_yaw) * relative_x + math.cos(1.57-self_yaw) * relative_y
obs_pos[1] = [x2, y2]
def self_callback(data):
global self_pos, self_yaw
_x = data.pose.pose.position.x
_y = data.pose.pose.position.y
ox = data.pose.pose.orientation.x
oy = data.pose.pose.orientation.y
oz = data.pose.pose.orientation.z
ow = data.pose.pose.orientation.w
self_yaw = qut2eu(ox, oy, oz, ow)
self_pos = [_y, _x]
def listener():
print('listener ready')
rospy.Subscriber("/tb3_0/odom", Odometry, self_callback)
rospy.Subscriber("/tb3_1/odom", Odometry, ob1_callback)
rospy.Subscriber("/tb3_2/odom", Odometry, ob2_callback)
rospy.spin()
def orca():
global obs_pos
global self_pos
global self_yaw
sim.processObstacles()
agents_position =[(0,0)]
agents = [sim.addAgent(position, 15.0, 10, 5.0, 2.0, 0.15, 3.0, (0.0,3.0)) for position in agents_position]
agents_velocity = [(0.0, 0.5)]
for agent, velocity in zip(agents, agents_velocity):
sim.setAgentPrefVelocity(agent, velocity)
print('Simulation has %i agents and %i obstacle vertices in it.' %
(sim.getNumAgents(), sim.getNumObstacleVertices()))
print("init pc2obs")
pc2obs.pc2obs_init()
print('Running simulation')
pc2obs_time = 0.0
lpp_time = 0.0
for step in range(100):
t1 = time.time()
#samples = pc2obs.pc2obs(voxel_size = voxel_size)
samples = np.array(obs_pos)
#print(samples)
t2 = time.time()
# print(samples)
if type(samples) == type(False):
continue
sim.clearObstacle()
obs_position_list = [[(x-size, y-size),(x+size, y-size), (x+size, y+size), (x-size, y+size)] for x,y in samples]
obs = [sim.addObstacle(obs_position) for obs_position in obs_position_list]
sim.processObstacles()
#for agent, velocity in zip(agents, agents_velocity):
# sim.setAgentPrefVelocity(agent, velocity)
# always set agent position as origin
sim.setAgentPosition(0, (0,0))
positions = [sim.getAgentPosition(agent) for agent in agents]
sim.doStep()
#positions = [sim.getAgentPosition(agent) for agent in agents]
#print('step=%2i t=%.3f %s' % (step, sim.getGlobalTime(), ' '.join(positions)))
positions = np.array(positions)
obs_position_list = np.array(obs_position_list)
velocity = [sim.getAgentVelocity(agent) for agent in agents]
# print("Agent velocity: {}, {}".format(velocity[0][0], velocity[0][1]))
if velocity[0][0] < 0:
direc = 2 # turn left
elif velocity[0][0] > 0:
direc = 3 # turn right
elif velocity[0][1] > 0:
direc = 1 # go straight
elif velocity[0][1] < 0:
direc = 4 # backward
else:
direc = 5 # stop
GoEasy(direc)
t3 = time.time()
pc2obs_time += t2-t1
lpp_time += t3-t2
# print("pc2obs took: {} sec".format(t2-t1))
# print("OCRA took: {} sec".format(t3-t2))
# print("Average took: {} sec, {} sec".format(pc2obs_time/(step+1), lpp_time/(step+1)))
# plt.arrow(positions[0][0], positions[0][1], velocity[0][0], velocity[0][1], width=0.05)
# for obs_position in obs_position_list:
# plt.plot(np.hstack([obs_position[:,0],obs_position[0][0]]), np.hstack([obs_position[:,1],obs_position[0][1]]))
# plt.scatter(positions[:,0], positions[:,1], label='agents')
# if len(samples) != 0:
# plt.scatter(samples[:,0], samples[:,1], label='samples')
# plt.legend()
# plt.title("Trajectories of the agnets")
# plt.xlabel("x (m)")
# plt.ylabel("y (m)")
# plt.xlim(-5,5)
# plt.ylim(-2,8)
# plt.pause(0.001)
# plt.cla()
# print("{:.6f} sec simulated".format(step/SIMUL_HZ))
time.sleep(0.1)
easyGo.stop()
rospy.signal_shutdown("esc")
sys.exit(1)
def load_orca():
print("orca ready")
orca_thread = threading.Thread(target=orca)
orca_thread.start()
if __name__ == "__main__":
try:
load_orca()
listener()
except KeyboardInterrupt:
print("Interrupted by key")
|
__init__.py
|
import linecache
from threading import Thread
import sys
from logging import Handler, Formatter
from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
import time
try:
from Queue import Queue
except:
from queue import Queue
import requests
import json
_levelToName = {
CRITICAL: 'FATAL',
ERROR: 'ERROR',
WARNING: 'WARN',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'TRACE',
}
class LoggerJsonFormatter(Formatter):
"""
Format record in LoggerJson format
"""
def format(self, record):
"""Formats LogRecord into python dictionary."""
# Standard document
document = {
'timestamp': time.time() * 1000.0,
'level': _levelToName[record.levelno],
'thread': record.threadName,
'thread_id': record.thread,
'message': record.getMessage(),
'logger': record.name,
'location': {
'filename': record.pathname,
'class': record.module,
'method': record.funcName,
'line': record.lineno
}
}
# Standard document decorated with exception info
if record.exc_info is not None:
document.update({
'throwable': {
'message': str(record.exc_info[1]),
'stack_trace': [
{
"line": stack[1],
"filename": stack[0],
"method": stack[2],
"line_code": stack[3]
}
for stack in LoggerJsonFormatter.extract_tb(record.exc_info[2])
]
}
})
return document
@staticmethod
def extract_tb(tb, limit=None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n + 1
return list
class LofkaHandler(Handler):
"""
Log handler which sending
"""
def __init__(self, target_url, app_name="default_python_application"):
super(LofkaHandler, self).__init__()
try:
with open("lofka.json", "r") as fp:
obj = json.load(fp)
target_url = obj['target_url']
app_name = obj['app_name']
except:
pass
self.target_url = target_url + "lofka/service/push"
self.app_name = app_name
self.formatter = LoggerJsonFormatter()
def emit(self, record):
"""
Commit record to server
:param record:
:return:
"""
record_object = self.formatter.format(record)
record_object["app_name"] = self.app_name
requests.post(self.target_url, data=json.dumps(record_object))
class LofkaAsyncHandler(Handler):
"""
Log handler which sending
"""
def __init__(self,
target_url,
app_name="default_python_application",
interval=1000,
max_buffer_size=1000
):
super(LofkaAsyncHandler, self).__init__()
try:
with open("lofka.json", "r") as fp:
obj = json.load(fp)
target_url = obj['target_url']
app_name = obj['app_name']
interval = int(obj['interval'])
max_buffer_size = int(obj['max_buffer_size'])
except:
pass
self.target_url = target_url + "lofka/service/push/batch"
self.app_name = app_name
self.formatter = LoggerJsonFormatter()
self.message_queue = Queue(int(max_buffer_size * 1.3)) # type: Queue
self.max_buffer_size = max_buffer_size
def push_data_periodically():
while True:
if self.message_queue.qsize() > 0:
self.__submit_batch(list(self.message_queue.queue))
self.message_queue.queue.clear()
else:
time.sleep(interval / 1000.0)
Thread(target=push_data_periodically).start()
def __submit_batch(self, data):
"""
Submit messages
:type data: list
:param data: messages
:return:
"""
requests.post(self.target_url, data=json.dumps(data))
def emit(self, record):
"""
Commit record to server
:param record:
:return:
"""
record_object = self.formatter.format(record)
record_object["app_name"] = self.app_name
self.message_queue.put(record_object, timeout=1)
if self.message_queue.qsize() > self.max_buffer_size:
self.__submit_batch(list(self.message_queue.queue))
self.message_queue.queue.clear()
|
thermo.py
|
import logging
from logging import handlers
from flask import Flask,render_template, request, g
from flask import Flask, request, flash, url_for, redirect, \
render_template, jsonify, Response
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask_socketio import SocketIO, send, emit
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
import threading
from thermo_monitor import ThermoMonitor
import requests
from resources import settings
import math
import os
import time
from datetime import datetime, timedelta
import flask_excel as excel
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
#import numpy as np
import sys
import random
import io
import numpy as np
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import dateutil.parser
import smbus2
import bme280
level = logging.NOTSET
format = '%(asctime)-8s %(levelname)-8s %(message)s'
formatter = logging.Formatter(format,"%Y-%m-%d %H:%M:%S")
writer = logging.StreamHandler()
writer.setFormatter(formatter)
handlers = [writer,logging.handlers.TimedRotatingFileHandler('thermo',when="D",interval=1,backupCount=5,encoding=None,delay=False,utc=False,atTime=None)]
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
thread_stop_event = threading.Event()
socket_thread = threading.Thread()
#bus = smbus2.SMBus(port)
logging.basicConfig(level = level, format = format, handlers = handlers)
thermo_handle = ThermoMonitor(70)
def reading_logger():
logging.info('LOGGER: Taking measurements.')
port = 1
address = 0x76
try:
bus = smbus2.SMBus(port)
calibration_params = bme280.load_calibration_params(bus, address)
while True:
current_set = thermo_handle.get_set()
data = bme280.sample(bus, address, calibration_params)
temperature = ((data.temperature*1.8)+32) + settings['setpoints'][int(current_set)]['temp_offset']
measure_new = measure(datetime.now(),int(current_set),data.humidity,temperature,0)
add_this = thermo_handle.set_current_temp(measure_new)
logging.info("STATE: " + add_this)
measure_new.set_state(add_this)
db.session.add(measure_new)
db.session.commit()
logging.info("LOGGER Read: " + str(measure_new))
time.sleep(60)
except OSError:
note = requests.get('https://maker.ifttt.com/trigger/change/with/key/bwuymkNBi9Ga5iBN0-NXDD')
time.sleep(120)
os.system('reboot')
def main():
#needs boolean, don't start until reading logger has completed first value.
logging.info('MAIN: Starting Up')
#start_temp_up = threading.Thread(name="recording temp values",target=reading_logger,daemon=True)
#start_temp_up.start()
class MyFlaskApp(SocketIO):
def run(self, app, host=None, port=None, debug=None, load_dotenv=True, **options):
start_HVAC = threading.Thread(name="HVAC_unit",target=reading_logger, daemon=True)
start_HVAC.start()
super(MyFlaskApp, self).run(app=app,host=host, port=port, debug=True,use_reloader=False,**options)
app = Flask(__name__)
app.config.from_pyfile(os.path.abspath('pod_db.cfg'))
global db
db = SQLAlchemy(app)
migrate = Migrate(app,db)
excel.init_excel(app)
socketio = MyFlaskApp(app)
def temp_cond(fix_this):
#greater than .8 roundup, otherwise roundDOWN
x = round(fix_this - math.floor(fix_this),1)
if x >= .8:
return int(round(fix_this,0))
else:
return int(math.floor(fix_this))
def hum_cond(fix_this):
#greater than .8 roundup, otherwise roundDOWN
x = round(fix_this - math.floor(fix_this),1)
if x >= .8:
return int(round(fix_this,0))
else:
return int(math.floor(fix_this))
class measure(db.Model):
__tablename__ = "measurements"
id = db.Column('measure_id', db.Integer, primary_key=True)
read_date = db.Column(db.DateTime)
#read_time = db.Column(db.DateTime)
curr_setpoint = db.Column(db.Integer)
curr_hum = db.Column(db.Float)
curr_temp = db.Column(db.Float)
adj_temp = db.Column(db.Float)
adj_hum = db.Column(db.Float)
HVAC_state = db.Column(db.String)
def set_state(self, the_state):
self.HVAC_state = the_state
def __str__(self):
return "Actual Temp %s, Adj Temp %s, Current Set %s" % ((self.curr_temp), self.adj_temp, self.curr_setpoint)
def __init__(self, read_date, setpoint, curr_hum,curr_temp,offset):
self.read_date = read_date
self.curr_setpoint = setpoint
self.curr_hum = round(curr_hum/100,2)
self.curr_temp = round(curr_temp,2)
self.adj_temp = temp_cond(self.curr_temp+offset)
self.HVAC_state = ""
#make sure this gets fix on
#self.adj_hum = round(self.curr_hum-((.43785694*self.curr_hum)-22.253659085944268),2)
self.adj_hum = self.curr_hum
#self.TC_temp = round(-8.96584011843079 + (self.curr_temp * 1.09058722) + ((self.adj_hum/100)*9.73214286),2)
#self.avg_temp = (self.adj_temp + self.TC_temp)/2
@app.route('/force_on')
def force_on():
thermo_handle.start_cooling('FORCE')
succ_response = {"status":"OK",'task':'forced on'}
return jsonify(succ_response)
@app.route('/setpoint/<change_point>')
def change_set(change_point):
thermo_handle.change_set(int(change_point))
succ_response = {"status":"OK","new set":change_point}
return jsonify(succ_response)
@app.route('/force_off')
def force_off():
thermo_handle.turn_off()
succ_response = {"status":"OK",'task':'forced off'}
return jsonify(succ_response)
@app.route('/plot.png')
def plot_png():
fig = create_figure()
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
def create_figure():
hours_tick = mdates.HourLocator()
minute_tick = mdates.MinuteLocator(byminute=30)
the_look = mdates.DateFormatter("%H:%M")
fig = Figure(figsize=(8,6))
axis = fig.add_subplot(1, 1, 1)
xs =[]
ys = []
xs_query = measure.query.with_entities(measure.curr_temp).order_by(measure.read_date.desc()).all()
for x in xs_query:
xs.append(x)
xs = np.asarray(xs)
ys_query = measure.query.with_entities(measure.read_date).order_by(measure.read_date.desc()).all()
for y in ys_query:
ys.append(y)
ys = np.asarray(ys)
axis.plot(ys, xs)
axis.xaxis.set_major_locator(hours_tick)
axis.xaxis.set_major_formatter(the_look)
axis.xaxis.set_minor_locator(minute_tick)
fig.autofmt_xdate()
return fig
@app.route('/date_pick')
def render_date_chooser():
return render_template('date_pick.html')
@app.route('/filter_test', methods=['POST'])
def filter_date():
#user_submit and user end will BOTH need to have times.
user_submit = datetime.strptime(request.form['date_pick'],'%m/%d/%Y')
print(user_submit)
user_end = user_submit + timedelta(hours=23,minutes=59)
print(user_end)
temp_table_x = measure.query.with_entities(measure.curr_temp).filter(measure.read_date.between(user_submit,user_end)).order_by(measure.read_date.desc()).all()
temp_table_y = measure.query.with_entities(measure.read_date).filter(measure.read_date.between(user_submit,user_end)).order_by(measure.read_date.desc()).all()
fig = create_figure(temp_table_x,temp_table_y)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
#return render_template('temperature_table.html',measure_list=temperature_table)
@app.route('/')
def index():
temperature_table = measure.query.order_by(measure.read_date.desc()).limit(60)
return render_template('temperature_table.html',measure_list=temperature_table)
@app.route('/change_set/<new_set>')
def change_set_at_monitor(new_set):
thermo_handle.change_set(int(new_set))
succ_response = {"status":"OK",'new set':new_set}
return jsonify(succ_response)
@app.route('/export', methods=['GET'])
def xls_out():
now = datetime.now()
date_time = "dump_thermo-" + now.strftime("%m.%d.%Y-%H.%M")
return excel.make_response_from_a_table(session=db.session,status=200,table=measure,file_type="xlsx",file_name=date_time)
def start_over():
db.reflect()
db.drop_all()
@app.route("/test")
def does_it_work():
figure_look = measure.query.first()
print(figure_look)
succ_response = {"status":"OK"}
return jsonify(succ_response)
def temp_sender_thread():
"""
Generate a random number every 1 second and emit to a socketio instance (broadcast)
Ideally to be run in a separate thread?
"""
logging.info("Sending Temp Updates")
while not thread_stop_event.isSet():
number = measure.query.order_by(measure.id.desc()).first()
logging.info(number)
socketio.emit('newtemp', {'temp': number.curr_temp,"hum":number.curr_hum,"set":number.curr_setpoint,"state":number.HVAC_state}, namespace='/thermostat')
socketio.sleep(30)
@socketio.on('connect', namespace='/thermostat')
def temperature_connect():
# need visibility of the global thread object
global socket_thread
print('Client connected')
thread_stop_event.clear()
#Start the random number generator thread only if the thread has not been started before.
if not socket_thread.isAlive():
print("Starting Thread")
socket_thread = socketio.start_background_task(temp_sender_thread)
@socketio.on('disconnect', namespace='/thermostat')
def temperature_disconnect():
print('Client disconnected')
if socket_thread.isAlive():
global thread_stop_event
thread_stop_event.set()
print('Disconected & thread stopped')
if __name__ == "__main__":
#start_over()
db.create_all()
bootstrap = Bootstrap(app)
socketio.run(app,host='0.0.0.0',port=1949)
|
app.py
|
import time
from pathlib import Path
from regionSelector import RegionSelector
from CrawlData import CrawlData
from multiprocessing import Process
import sys, os, traceback, types
import cv2
import imutils
import numpy as np
import sqlite3
# from detect.bikeDetect import BikeDetect
from detect.carDetect import CarDetect
def nothing(x):
pass
class App():
def __init__(self,settings):
self.settings = settings
self.carDetect = CarDetect()
# self.bikeDetect = BikeDetect()
self.camera = cv2.VideoCapture(settings['source'])
self.winName = settings['windowName']
self.crawlData = CrawlData(settings)
if( not os.path.isdir(settings["storage"])):
os.mkdir(settings["storage"])
if( not os.path.isdir(settings["storage"]+"\\images")):
os.mkdir(settings["storage"]+"\\images")
self.__regionSelector = RegionSelector(self.camera.read()[1],self.winName)
cv2.namedWindow(self.winName, cv2.WINDOW_NORMAL)
def run(self):
processes = []
self.__regionSelector.select_region()
fps = 0
#run
PressedKey = None
while PressedKey != 27:
if(PressedKey == 22):
print("Starting data viewer...")
print(str(os.startfile(os.getcwd()+'/DataViewer/Data Viewer/bin/Release/Data Viewer.exe')))
start_time = time.time()
hasFrame,frame = self.camera.read()
orignal_frame = frame.copy()
roi = self.__regionSelector.getROI(frame)
# carCords = self.carDetect.getCords(roi)
# frame = self.carDetect.drawCords(roi,carCords)
plateCords = self.carDetect.detectNumberPlate(roi,frame)
plts = self.carDetect.plateOCR(frame,plateCords)
# self.database.insertPlates(plts)
if len(plts) > 0:
plts[0][1] = frame
proc = Process(target=self.crawlData.fetch,args=(plts[0],))
processes.append(proc)
proc.start()
self.carDetect.drawCords(frame,plateCords)
frame = cv2.putText(roi,str(str(fps)+" fps"),(10,30),cv2.FONT_ITALIC,0.5,(255,255,0),1)
cv2.imshow('GodsEye',self.__regionSelector.undoROI(orignal_frame,roi))
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
cv2.waitKey(3000)
break
fps = int(1.0 / (time.time() - start_time))
PressedKey = cv2.waitKey(1)
n = len(processes)
for proc in processes:
frame = cv2.putText(np.zeros((50,500,3))," Please wait, Let us finish all crawling processes("+str(n)+")",(10,30),cv2.FONT_ITALIC,0.5,(255,255,255),1)
cv2.imshow('GodsEye',frame)
cv2.waitKey(1)
n-=1
proc.join()
frame = cv2.putText(np.zeros((50,500,3)),"Thank You, Good Bye",(10,30),cv2.FONT_ITALIC,0.5,(255,255,255),1)
cv2.imshow('GodsEye',frame)
cv2.waitKey(3000)
cv2.destroyAllWindows()
self.camera.release()
def __del__(self):
pass
|
viewport_engine_2.py
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import time
import threading
import pyrpr
from .viewport_engine import ViewportEngine, ViewportSettings, FinishRenderException
from .context import RPRContext2
from rprblender.utils import logging
log = logging.Log(tag='viewport_engine_2')
class ViewportEngine2(ViewportEngine):
_RPRContext = RPRContext2
def __init__(self, rpr_engine):
super().__init__(rpr_engine)
self.is_last_iteration = False
self.rendered_image = None
self.resolve_event = threading.Event()
self.resolve_thread = None
self.resolve_lock = threading.Lock()
def stop_render(self):
self.is_finished = True
self.restart_render_event.set()
self.resolve_event.set()
self.sync_render_thread.join()
self.resolve_thread.join()
self.rpr_context.set_render_update_callback(None)
self.rpr_context = None
self.image_filter = None
self.upscale_filter = None
def _resolve(self):
self.rpr_context.resolve(None if self.image_filter and self.is_last_iteration else
(pyrpr.AOV_COLOR,))
def _resize(self, width, height):
if self.width == width and self.height == height:
self.is_resized = False
return
with self.render_lock:
with self.resolve_lock:
self.rpr_context.resize(width, height)
self.width = width
self.height = height
if self.image_filter:
image_filter_settings = self.image_filter.settings.copy()
image_filter_settings['resolution'] = self.width, self.height
self.setup_image_filter(image_filter_settings)
if self.background_filter:
background_filter_settings = self.background_filter.settings.copy()
background_filter_settings['resolution'] = self.width, self.height
self.setup_background_filter(background_filter_settings)
if self.upscale_filter:
upscale_filter_settings = self.upscale_filter.settings.copy()
upscale_filter_settings['resolution'] = self.width, self.height
self.setup_upscale_filter(upscale_filter_settings)
self.is_resized = True
def _do_render(self):
iteration = 0
time_begin = 0.0
update_iterations = 1
is_set_callback = False
def render_update(progress):
if self.restart_render_event.is_set():
self.rpr_context.abort_render()
return
# don't need to do intermediate update when progress == 1.0
if progress == 1.0:
return
self.resolve_event.set()
time_render = time.perf_counter() - time_begin
self.notify_status(f"Time: {time_render:.1f} sec | Iteration "
f"{iteration + update_iterations}/{self.render_iterations}" +
"." * int(progress / 0.2), "Render")
self.notify_status("Starting...", "Render")
# Infinite cycle, which starts when scene has to be re-rendered.
# It waits for restart_render_event be enabled.
# Exit from this cycle is implemented through raising FinishRender
# when self.is_finished be enabled from main thread.
while True:
self.restart_render_event.wait()
if self.is_finished:
raise FinishRenderException
# preparations to start rendering
iteration = 0
time_begin = 0.0
time_render = 0.0
self.is_last_iteration = False
# this cycle renders each iteration
while True:
if self.is_finished:
raise FinishRenderException
if self.restart_render_event.is_set():
# clears restart_render_event, prepares to start rendering
self.restart_render_event.clear()
vs = self.viewport_settings
if vs is None:
continue
if self.user_settings.adapt_viewport_resolution:
self._adapt_resize(*self._get_resolution(vs),
self.user_settings.min_viewport_resolution_scale * 0.01)
else:
self._resize(*self._get_resolution(vs))
self.is_resolution_adapted = not self.user_settings.adapt_viewport_resolution
vs.export_camera(self.rpr_context.scene.camera)
iteration = 0
self.rpr_context.sync_auto_adapt_subdivision()
self.rpr_context.sync_portal_lights()
time_begin = time.perf_counter()
log(f"Restart render [{vs.width}, {vs.height}]")
if self.restart_render_event.is_set():
continue
self.rpr_context.set_parameter(pyrpr.CONTEXT_FRAMECOUNT, iteration)
update_iterations = 1
if iteration > 1:
update_iterations = min(32, self.render_iterations - iteration)
self.rpr_context.set_parameter(pyrpr.CONTEXT_ITERATIONS, update_iterations)
# unsetting render update callback for first iteration and set it back
# starting from second iteration
if iteration == 0:
self.rpr_context.set_render_update_callback(None)
self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, 3)
is_set_callback = False
elif iteration == 1:
if self.is_resolution_adapted:
self.rpr_context.clear_frame_buffers()
self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, 0)
elif not is_set_callback:
self.rpr_context.set_render_update_callback(render_update)
is_set_callback = True
# rendering
with self.render_lock:
try:
self.rpr_context.render(restart=(iteration == 0))
except pyrpr.CoreError as e:
if e.status != pyrpr.ERROR_ABORTED: # ignoring ERROR_ABORTED
raise
if iteration > 0 and self.restart_render_event.is_set():
continue
if iteration == 1 and not self.is_resolution_adapted:
time_render_prev = time_render
time_render = time.perf_counter() - time_begin
iteration_time = time_render - time_render_prev
target_time = 1.0 / self.user_settings.viewport_samples_per_sec
self.requested_adapt_ratio = target_time / iteration_time
self._adapt_resize(*self._get_resolution(self.viewport_settings),
self.user_settings.min_viewport_resolution_scale * 0.01,
self.requested_adapt_ratio)
iteration = 0
self.is_resolution_adapted = True
continue
iteration += update_iterations
self.is_last_iteration = iteration >= self.render_iterations
if self.is_last_iteration:
break
# getting render results only for first iteration, for other iterations
if iteration == 1:
with self.resolve_lock:
self._resolve()
self.rendered_image = self.rpr_context.get_image()
else:
self.resolve_event.set()
time_render = time.perf_counter() - time_begin
self.notify_status(f"Time: {time_render:.1f} sec | Iteration {iteration}/"
f"{self.render_iterations}", "Render")
if not self.is_last_iteration:
continue
# notifying viewport that rendering is finished
with self.resolve_lock:
self._resolve()
time_render = time.perf_counter() - time_begin
with self.render_lock:
if self.image_filter:
self.notify_status(f"Time: {time_render:.1f} sec | Iteration: {iteration}"
f" | Denoising...", "Render")
# applying denoising
self.update_image_filter_inputs()
self.image_filter.run()
image = self.image_filter.get_data()
time_render = time.perf_counter() - time_begin
status_str = f"Time: {time_render:.1f} sec | Iteration: {iteration} | Denoised"
else:
image = self.rpr_context.get_image()
status_str = f"Time: {time_render:.1f} sec | Iteration: {iteration}"
if self.background_filter:
with self.resolve_lock:
self.rendered_image = self.resolve_background_aovs(self.rendered_image)
else:
self.rendered_image = image
if self.upscale_filter:
self.upscale_filter.update_input('color', self.rendered_image)
self.upscale_filter.run()
self.rendered_image = self.upscale_filter.get_data()
status_str += " | Upscaled"
self.notify_status(status_str, "Rendering Done")
def _do_resolve(self):
while True:
self.resolve_event.wait()
self.resolve_event.clear()
if self.is_finished:
break
if self.restart_render_event.is_set():
continue
if self.is_last_iteration:
continue
with self.resolve_lock:
self._resolve()
image = self.rpr_context.get_image()
if self.background_filter:
image = self.resolve_background_aovs(image)
self.rendered_image = image
else:
self.rendered_image = image
log("Finish _do_resolve")
def resolve_background_aovs(self, color_image):
settings = self.background_filter.settings
self.rpr_context.resolve((pyrpr.AOV_OPACITY,))
alpha = self.rpr_context.get_image(pyrpr.AOV_OPACITY)
if settings['use_shadow']:
self.rpr_context.resolve((pyrpr.AOV_SHADOW_CATCHER,))
if settings['use_reflection']:
self.rpr_context.resolve((pyrpr.AOV_REFLECTION_CATCHER,))
if settings['use_shadow'] or settings['use_reflection']:
self.rpr_context.resolve((pyrpr.AOV_BACKGROUND,))
self.update_background_filter_inputs(color_image=color_image, opacity_image=alpha)
self.background_filter.run()
return self.background_filter.get_data()
def draw(self, context):
log("Draw")
if not self.is_synced or self.is_finished:
return
# initializing self.viewport_settings and requesting first self.restart_render_event
if not self.viewport_settings:
self.viewport_settings = ViewportSettings(context)
self._resize(*self._get_resolution())
self.restart_render_event.set()
return
# checking for viewport updates: setting camera position and resizing
viewport_settings = ViewportSettings(context)
if viewport_settings.width * viewport_settings.height == 0:
return
if self.viewport_settings != viewport_settings:
self.viewport_settings = viewport_settings
self.restart_render_event.set()
im = self.rendered_image
if im is None:
return
self.gl_texture.set_image(im)
self.draw_texture(self.gl_texture.texture_id, context.scene)
def sync(self, context, depsgraph):
super().sync(context, depsgraph)
self.resolve_thread = threading.Thread(target=self._do_resolve)
self.resolve_thread.start()
def _sync_update_after(self):
self.rpr_engine.update_stats("Render", "Syncing...")
|
helpers.py
|
import colorsys # for get_N_HexCol
import os
import json
import csv
import sys
import subprocess
from multiprocessing import Process, Queue
import subprocess # copy text to clipboard
import time
def pd(what, start_time):
time_in_min = (time.time() - start_time) / 60.0
time_in_h = time_in_min / 60.0
print("%s took ~%0.2f min, ~%0.2f h"%(what, time_in_min, time_in_h))
def copy2clip(txt):
cmd="echo '"+txt.strip()+"'| xsel --clipboard"
return subprocess.check_call(cmd, shell=True)
# https://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-outputhttps://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output
def execute_command(command_str):
command_pieces = command_str.split(" ")
return subprocess.check_output(command_pieces)
# saves a file to the experiment directory
def save_to_json(data, outfile_name):
with open(outfile_name, "w") as f:
f.write( json.dumps( data ) )
print("Saved to json: %s."%outfile_name)
# dump data array to file
def save_to_csv(data_rows, outfile_name):
with open(outfile_name, "w") as f:
cw = csv.writer(f,delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for data_row in data_rows:
cw.writerow(data_row)
print("Saved to csv: %s."%outfile_name)
# load data from vocabulary
def load_from_json(infile_name):
with open(infile_name, "r") as f:
print("Loaded from json: %s"%infile_name)
json_str = f.read()
return json.loads(json_str)
# load data from csv
def load_from_csv(infile_name):
with open(infile_name, "r") as f:
read_rows = csv.reader(f,delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
rows = []
for row in read_rows:
rows.append(row)
print("Loaded from csv: %s."%infile_name)
return rows
def create_if_not_exists(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def num_lines(file_name):
return int(execute_command("wc -l %s"%file_name).split()[0])
# get a hex color range for number of parts
def get_N_HexCol(N=5):
HSV_tuples = [(x*1.0/N, 1, 1) for x in xrange(N)]
hex_out = []
for rgb in HSV_tuples:
rgb = map(lambda x: int(x*255),colorsys.hsv_to_rgb(*rgb))
hex_out.append("#"+ "".join(map(lambda x: chr(x).encode('hex'),rgb)).upper() )
return hex_out
"""
def process_one_line(line_q, result_q, ):
# get item blocking
new_line = line_q.get(timeout=5)
# do something
result = experiment_lib.extract_pattern_id(new_line)
# get back result
result_q.put(result)
"""
def multiprocess_file(file_name, process_one_line, num_processes=8, max_size=10000):
# define multithreading
line_q = Queue(maxsize=max_size)
result_q = Queue()
# process for scooping lines to process on input q
def load_line(line_q, file_name):
for l in open(file_name, 'r'):
line_q.put(l)
# wrapper for processing the line in one loop
def proccess_one_line_loop(line_q,result_q,pid):
try:
while True:
process_one_line(line_q,result_q)
except Exception as e:
print(e)
print("Shutting down processing thread %i"%pid)
# define processes
processes = []
for pid in xrange(num_processes):
processes.append(Process(target=proccess_one_line_loop, args=(line_q,result_q,pid)))
line_load_p = Process(target=load_line, args=(line_q,file_name))
# start threads
[p.start() for p in processes]
line_load_p.start()
return result_q, line_q, processes, line_load_p
# Print iterations progress
def print_progress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
print("\n")
|
Weld_Path_Embedded.py
|
# This example shows how to create a small UI window embedded in RoboDK
# Type help("robolink") or help("robodk") for more information
# Press F5 to run the script
# Documentation: https://robodk.com/doc/en/RoboDK-API.html
# Reference: https://robodk.com/doc/en/PythonAPI/index.html
# Note: It is not required to keep a copy of this file, your python script is saved with the station
from robolink import * # RoboDK API
from robodk import * # Robot toolbox
from tkinter import *
NUM_SIDES = 7
LENGTH = 200
RDK = Robolink()
robot = RDK.Item('', ITEM_TYPE_ROBOT)
# get the home target and the welding targets:
home = RDK.Item('Home')
if not home.Valid():
raise Exception("Home target not defined")
target = RDK.Item('Target Reference')
if not target.Valid():
raise Exception("Target Reference not available")
def RunProgram():
global NUM_SIDES
global LENGTH
target_pose = target.Pose()
target_xyz = target_pose.Pos()
# Move the robot to the reference point:
robot.MoveJ(home)
robot.MoveJ(target)
for i in range((NUM_SIDES) + 1):
angle = i*2*pi/NUM_SIDES
posei = target_pose * rotz(angle) * transl(LENGTH,0,0) * rotz(-angle)
robot.MoveL(posei)
robot.MoveL(target)
robot.MoveJ(home)
#RDK.Finish()
# Create option window
root = Tk()
num_sides = StringVar()
num_sides.set(str(NUM_SIDES))
length = StringVar()
length.set(str(LENGTH))
Label(root,text = "Enter the number of sides for polygon").pack()
Entry(root,textvariable = num_sides).pack()
Label(root,text = "Enter the radius").pack()
Entry(root,textvariable = length).pack()
import threading
rdk_lock = threading.Lock()
def ExecuteChoice():
def thread_ExecuteChoice():
"""We need to run on a separate thread to make sure we don't block the main loop, otherwise, RoboDK will freeze"""
global NUM_SIDES
global LENGTH
NUM_SIDES = int(num_sides.get())
LENGTH = float(length.get())
# We need to make sure we don't run the program twice at the same time
if rdk_lock.locked():
print("Operation ignored. Waiting to complete previous movement...")
return
rdk_lock.acquire()
RunProgram()
rdk_lock.release()
threading.Thread(target=thread_ExecuteChoice).start()
Button(root,text = "Start",command = ExecuteChoice).pack()
# Embed the window in RoboDK
window_title = "Weld path"
root.title(window_title)
EmbedWindow(window_title)
root.mainloop()
|
test_pooling_base.py
|
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes to test built-in connection-pooling with threads or greenlets.
"""
import gc
import random
import socket
import sys
import thread
import threading
import time
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
import pymongo.pool
from pymongo.connection import Connection
from pymongo.pool import Pool, NO_REQUEST, NO_SOCKET_YET, SocketInfo
from pymongo.errors import ConfigurationError
from test import version
from test.test_connection import get_connection, host, port
from test.utils import delay, is_mongos, one
N = 50
DB = "pymongo-pooling-tests"
if sys.version_info[0] >= 3:
from imp import reload
try:
import gevent
from gevent import Greenlet, monkey, hub
import gevent.coros, gevent.event
has_gevent = True
except ImportError:
has_gevent = False
class MongoThread(object):
"""A thread, or a greenlet, that uses a Connection"""
def __init__(self, test_case):
self.use_greenlets = test_case.use_greenlets
self.connection = test_case.c
self.db = self.connection[DB]
self.ut = test_case
self.passed = False
def start(self):
if self.use_greenlets:
# A Gevent extended Greenlet
self.thread = Greenlet(self.run)
else:
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True) # Don't hang whole test if thread hangs
self.thread.start()
def join(self):
self.thread.join(300)
if self.use_greenlets:
assert self.thread.dead, "Greenlet timeout"
else:
assert not self.thread.isAlive(), "Thread timeout"
self.thread = None
def run(self):
self.run_mongo_thread()
# No exceptions thrown
self.passed = True
def run_mongo_thread(self):
raise NotImplementedError()
class SaveAndFind(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
rand = random.randint(0, N)
_id = self.db.sf.save({"x": rand}, safe=True)
self.ut.assertEqual(rand, self.db.sf.find_one(_id)["x"])
class Unique(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.connection.start_request()
self.db.unique.insert({})
self.ut.assertEqual(None, self.db.error())
self.connection.end_request()
class NonUnique(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.connection.start_request()
self.db.unique.insert({"_id": "jesse"})
self.ut.assertNotEqual(None, self.db.error())
self.connection.end_request()
class Disconnect(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.connection.disconnect()
class NoRequest(MongoThread):
def run_mongo_thread(self):
self.connection.start_request()
errors = 0
for _ in xrange(N):
self.db.unique.insert({"_id": "jesse"})
if not self.db.error():
errors += 1
self.connection.end_request()
self.ut.assertEqual(0, errors)
def run_cases(ut, cases):
threads = []
nruns = 10
if (
ut.use_greenlets and sys.platform == 'darwin'
and gevent.version_info[0] < 1
):
# Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than
# about 35 Greenlets share a Connection. Apparently fixed in
# recent Gevent development.
nruns = 5
for case in cases:
for i in range(nruns):
t = case(ut)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
assert t.passed, "%s.run_mongo_thread() threw an exception" % repr(t)
class OneOp(MongoThread):
def __init__(self, ut):
super(OneOp, self).__init__(ut)
def run_mongo_thread(self):
pool = self.connection._MongoClient__pool
assert len(pool.sockets) == 1, "Expected 1 socket, found %d" % (
len(pool.sockets)
)
sock_info = one(pool.sockets)
self.connection.start_request()
# start_request() hasn't yet moved the socket from the general pool into
# the request
assert len(pool.sockets) == 1
assert one(pool.sockets) == sock_info
self.connection[DB].test.find_one()
# find_one() causes the socket to be used in the request, so now it's
# bound to this thread
assert len(pool.sockets) == 0
assert pool._get_request_state() == sock_info
self.connection.end_request()
# The socket is back in the pool
assert len(pool.sockets) == 1
assert one(pool.sockets) == sock_info
class CreateAndReleaseSocket(MongoThread):
class Rendezvous(object):
def __init__(self, nthreads, use_greenlets):
self.nthreads = nthreads
self.nthreads_run = 0
if use_greenlets:
self.lock = gevent.coros.RLock()
self.ready = gevent.event.Event()
else:
self.lock = threading.Lock()
self.ready = threading.Event()
def __init__(self, ut, connection, start_request, end_request, rendevous):
super(CreateAndReleaseSocket, self).__init__(ut)
self.connection = connection
self.start_request = start_request
self.end_request = end_request
self.rendevous = rendevous
def run_mongo_thread(self):
# Do an operation that requires a socket.
# test_max_pool_size uses this to spin up lots of threads requiring
# lots of simultaneous connections, to ensure that Pool obeys its
# max_size configuration and closes extra sockets as they're returned.
for i in range(self.start_request):
self.connection.start_request()
# Use a socket
self.connection[DB].test.find_one()
# Don't finish until all threads reach this point
r = self.rendevous
r.lock.acquire()
r.nthreads_run += 1
if r.nthreads_run == r.nthreads:
# Everyone's here, let them finish
r.ready.set()
r.lock.release()
else:
r.lock.release()
r.ready.wait(2) # Wait two seconds
assert r.ready.isSet(), "Rendezvous timed out"
for i in range(self.end_request):
self.connection.end_request()
class _TestPoolingBase(object):
"""Base class for all connection-pool tests. Doesn't inherit from
unittest.TestCase, and its name is prefixed with "_" to avoid being
run by nose. Real tests double-inherit from this base and from TestCase.
"""
use_greenlets = False
def setUp(self):
if self.use_greenlets:
if not has_gevent:
raise SkipTest("Gevent not installed")
# Note we don't do patch_thread() or patch_all() - we're
# testing here that patch_thread() is unnecessary for
# the connection pool to work properly.
monkey.patch_socket()
self.c = self.get_connection(auto_start_request=False)
# reset the db
db = self.c[DB]
db.unique.drop()
db.test.drop()
db.unique.insert({"_id": "jesse"}, safe=True)
db.test.insert([{} for i in range(10)], safe=True)
def tearDown(self):
self.c.close()
if self.use_greenlets:
# Undo patch
reload(socket)
def get_connection(self, *args, **kwargs):
opts = kwargs.copy()
opts['use_greenlets'] = self.use_greenlets
return get_connection(*args, **opts)
def get_pool(self, *args, **kwargs):
kwargs['use_greenlets'] = self.use_greenlets
return Pool(*args, **kwargs)
def assert_no_request(self):
self.assertEqual(
NO_REQUEST, self.c._MongoClient__pool._get_request_state()
)
def assert_request_without_socket(self):
self.assertEqual(
NO_SOCKET_YET, self.c._MongoClient__pool._get_request_state()
)
def assert_request_with_socket(self):
self.assertTrue(isinstance(
self.c._MongoClient__pool._get_request_state(), SocketInfo
))
def assert_pool_size(self, pool_size):
self.assertEqual(
pool_size, len(self.c._MongoClient__pool.sockets)
)
class _TestPooling(_TestPoolingBase):
"""Basic pool tests, to be run both with threads and with greenlets."""
def test_max_pool_size_validation(self):
self.assertRaises(
ConfigurationError, Connection, host=host, port=port,
max_pool_size=-1
)
self.assertRaises(
ConfigurationError, Connection, host=host, port=port,
max_pool_size='foo'
)
c = Connection(host=host, port=port, max_pool_size=100)
self.assertEqual(c.max_pool_size, 100)
def test_no_disconnect(self):
run_cases(self, [NoRequest, NonUnique, Unique, SaveAndFind])
def test_simple_disconnect(self):
# Connection just created, expect 1 free socket
self.assert_pool_size(1)
self.assert_no_request()
self.c.start_request()
self.assert_request_without_socket()
cursor = self.c[DB].stuff.find()
# Cursor hasn't actually caused a request yet, so there's still 1 free
# socket.
self.assert_pool_size(1)
self.assert_request_without_socket()
# Actually make a request to server, triggering a socket to be
# allocated to the request
list(cursor)
self.assert_pool_size(0)
self.assert_request_with_socket()
# Pool returns to its original state
self.c.end_request()
self.assert_no_request()
self.assert_pool_size(1)
self.c.disconnect()
self.assert_pool_size(0)
self.assert_no_request()
def test_disconnect(self):
run_cases(self, [SaveAndFind, Disconnect, Unique])
def test_independent_pools(self):
# Test for regression of very early PyMongo bug: separate pools shared
# state.
p = self.get_pool((host, port), 10, None, None, False)
self.c.start_request()
self.c.pymongo_test.test.find_one()
self.assertEqual(set(), p.sockets)
self.c.end_request()
self.assert_pool_size(1)
self.assertEqual(set(), p.sockets)
def test_dependent_pools(self):
self.assert_pool_size(1)
self.c.start_request()
self.assert_request_without_socket()
self.c.test.test.find_one()
self.assert_request_with_socket()
self.assert_pool_size(0)
self.c.end_request()
self.assert_pool_size(1)
t = OneOp(self)
t.start()
t.join()
self.assertTrue(t.passed, "OneOp.run() threw exception")
self.assert_pool_size(1)
self.c.test.test.find_one()
self.assert_pool_size(1)
def test_multiple_connections(self):
a = self.get_connection(auto_start_request=False)
b = self.get_connection(auto_start_request=False)
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(1, len(b._MongoClient__pool.sockets))
a.start_request()
a.test.test.find_one()
self.assertEqual(0, len(a._MongoClient__pool.sockets))
a.end_request()
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(1, len(b._MongoClient__pool.sockets))
a_sock = one(a._MongoClient__pool.sockets)
b.end_request()
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(1, len(b._MongoClient__pool.sockets))
b.start_request()
b.test.test.find_one()
self.assertEqual(1, len(a._MongoClient__pool.sockets))
self.assertEqual(0, len(b._MongoClient__pool.sockets))
b.end_request()
b_sock = one(b._MongoClient__pool.sockets)
b.test.test.find_one()
a.test.test.find_one()
self.assertEqual(b_sock,
b._MongoClient__pool.get_socket((b.host, b.port)))
self.assertEqual(a_sock,
a._MongoClient__pool.get_socket((a.host, a.port)))
a_sock.close()
b_sock.close()
def test_request(self):
# Check that Pool gives two different sockets in two calls to
# get_socket() -- doesn't automatically put us in a request any more
cx_pool = self.get_pool(
pair=(host,port),
max_size=10,
net_timeout=1000,
conn_timeout=1000,
use_ssl=False
)
sock0 = cx_pool.get_socket()
sock1 = cx_pool.get_socket()
self.assertNotEqual(sock0, sock1)
# Now in a request, we'll get the same socket both times
cx_pool.start_request()
sock2 = cx_pool.get_socket()
sock3 = cx_pool.get_socket()
self.assertEqual(sock2, sock3)
# Pool didn't keep reference to sock0 or sock1; sock2 and 3 are new
self.assertNotEqual(sock0, sock2)
self.assertNotEqual(sock1, sock2)
# Return the request sock to pool
cx_pool.end_request()
sock4 = cx_pool.get_socket()
sock5 = cx_pool.get_socket()
# Not in a request any more, we get different sockets
self.assertNotEqual(sock4, sock5)
# end_request() returned sock2 to pool
self.assertEqual(sock4, sock2)
for s in [sock0, sock1, sock2, sock3, sock4, sock5]:
s.close()
def test_reset_and_request(self):
# reset() is called after a fork, or after a socket error. Ensure that
# a new request is begun if a request was in progress when the reset()
# occurred, otherwise no request is begun.
p = self.get_pool((host, port), 10, None, None, False)
self.assertFalse(p.in_request())
p.start_request()
self.assertTrue(p.in_request())
p.reset()
self.assertTrue(p.in_request())
p.end_request()
self.assertFalse(p.in_request())
p.reset()
self.assertFalse(p.in_request())
def test_pool_reuses_open_socket(self):
# Test Pool's _check_closed() method doesn't close a healthy socket
cx_pool = self.get_pool((host,port), 10, None, None, False)
sock_info = cx_pool.get_socket()
cx_pool.maybe_return_socket(sock_info)
# trigger _check_closed, which only runs on sockets that haven't been
# used in a second
time.sleep(1.1)
new_sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, new_sock_info)
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_socket(self):
# Test that Pool removes dead socket and the socket doesn't return
# itself PYTHON-344
cx_pool = self.get_pool((host,port), 10, None, None, False)
sock_info = cx_pool.get_socket()
# Simulate a closed socket without telling the SocketInfo it's closed
sock_info.sock.close()
self.assertTrue(pymongo.pool._closed(sock_info.sock))
cx_pool.maybe_return_socket(sock_info)
time.sleep(1.1) # trigger _check_closed
new_sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertNotEqual(sock_info, new_sock_info)
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_request_socket_after_1_sec(self):
# Test that Pool keeps request going even if a socket dies in request
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertEqual(sock_info, cx_pool._get_request_state())
sock_info.sock.close()
cx_pool.maybe_return_socket(sock_info)
time.sleep(1.1) # trigger _check_closed
# Although the request socket died, we're still in a request with a
# new socket
new_sock_info = cx_pool.get_socket()
self.assertTrue(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
self.assertEqual(0, len(cx_pool.sockets))
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_request_socket(self):
# Test that Pool keeps request going even if a socket dies in request
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertEqual(sock_info, cx_pool._get_request_state())
# Unlike in test_pool_removes_dead_request_socket_after_1_sec, we
# set sock_info.closed and *don't* wait 1 second
sock_info.close()
cx_pool.maybe_return_socket(sock_info)
# Although the request socket died, we're still in a request with a
# new socket
new_sock_info = cx_pool.get_socket()
self.assertTrue(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
self.assertEqual(0, len(cx_pool.sockets))
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_socket_after_request(self):
# Test that Pool handles a socket dying that *used* to be the request
# socket.
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, cx_pool._get_request_state())
# End request
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
# Kill old request socket
sock_info.sock.close()
cx_pool.maybe_return_socket(sock_info)
time.sleep(1.1) # trigger _check_closed
# Dead socket detected and removed
new_sock_info = cx_pool.get_socket()
self.assertFalse(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(0, len(cx_pool.sockets))
self.assertFalse(pymongo.pool._closed(new_sock_info.sock))
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_socket_reclamation(self):
if sys.platform.startswith('java'):
raise SkipTest("Jython can't do socket reclamation")
# Check that if a thread starts a request and dies without ending
# the request, that the socket is reclaimed into the pool.
cx_pool = self.get_pool(
pair=(host,port),
max_size=10,
net_timeout=1000,
conn_timeout=1000,
use_ssl=False,
)
self.assertEqual(0, len(cx_pool.sockets))
lock = None
the_sock = [None]
def leak_request():
self.assertEqual(NO_REQUEST, cx_pool._get_request_state())
cx_pool.start_request()
self.assertEqual(NO_SOCKET_YET, cx_pool._get_request_state())
sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, cx_pool._get_request_state())
the_sock[0] = id(sock_info.sock)
cx_pool.maybe_return_socket(sock_info)
if not self.use_greenlets:
lock.release()
if self.use_greenlets:
g = Greenlet(leak_request)
g.start()
g.join(1)
self.assertTrue(g.ready(), "Greenlet is hung")
else:
lock = thread.allocate_lock()
lock.acquire()
# Start a thread WITHOUT a threading.Thread - important to test that
# Pool can deal with primitive threads.
thread.start_new_thread(leak_request, ())
# Join thread
acquired = lock.acquire()
self.assertTrue(acquired, "Thread is hung")
# Make sure thread is really gone
time.sleep(1)
if 'PyPy' in sys.version:
gc.collect()
# Access the thread local from the main thread to trigger the
# ThreadVigil's delete callback, returning the request socket to
# the pool.
# In Python 2.6 and lesser, a dead thread's locals are deleted
# and those locals' weakref callbacks are fired only when another
# thread accesses the locals and finds the thread state is stale.
# This is more or less a bug in Python <= 2.6. Accessing the thread
# local from the main thread is a necessary part of this test, and
# realistic: in a multithreaded web server a new thread will access
# Pool._ident._local soon after an old thread has died.
cx_pool._ident.get()
# Pool reclaimed the socket
self.assertEqual(1, len(cx_pool.sockets))
self.assertEqual(the_sock[0], id(one(cx_pool.sockets).sock))
class _TestMaxPoolSize(_TestPoolingBase):
"""Test that connection pool keeps proper number of idle sockets open,
no matter how start/end_request are called. To be run both with threads and
with greenlets.
"""
def _test_max_pool_size(self, start_request, end_request):
c = self.get_connection(max_pool_size=4, auto_start_request=False)
# If you increase nthreads over about 35, note a
# Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than
# about 35 Greenlets share a Connection. Apparently fixed in
# recent Gevent development.
nthreads = 10
rendevous = CreateAndReleaseSocket.Rendezvous(
nthreads, self.use_greenlets)
threads = []
for i in range(nthreads):
t = CreateAndReleaseSocket(
self, c, start_request, end_request, rendevous)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
self.assertTrue(t.passed)
# Socket-reclamation doesn't work in Jython
if not sys.platform.startswith('java'):
cx_pool = c._MongoClient__pool
# Socket-reclamation depends on timely garbage-collection
if 'PyPy' in sys.version:
gc.collect()
if self.use_greenlets:
# Wait for Greenlet.link() callbacks to execute
the_hub = hub.get_hub()
if hasattr(the_hub, 'join'):
# Gevent 1.0
the_hub.join()
else:
# Gevent 0.13 and less
the_hub.shutdown()
if start_request:
self.assertEqual(4, len(cx_pool.sockets))
else:
# Without calling start_request(), threads can safely share
# sockets; the number running concurrently, and hence the number
# of sockets needed, is between 1 and 10, depending on thread-
# scheduling.
self.assertTrue(len(cx_pool.sockets) >= 1)
def test_max_pool_size(self):
self._test_max_pool_size(0, 0)
def test_max_pool_size_with_request(self):
self._test_max_pool_size(1, 1)
def test_max_pool_size_with_redundant_request(self):
self._test_max_pool_size(2, 1)
self._test_max_pool_size(20, 1)
def test_max_pool_size_with_leaked_request(self):
# Call start_request() but not end_request() -- when threads die, they
# should return their request sockets to the pool.
self._test_max_pool_size(1, 0)
def test_max_pool_size_with_end_request_only(self):
# Call end_request() but not start_request()
self._test_max_pool_size(0, 1)
class _TestPoolSocketSharing(_TestPoolingBase):
"""Directly test that two simultaneous operations don't share a socket. To
be run both with threads and with greenlets.
"""
def _test_pool(self, use_request):
"""
Test that the connection pool prevents both threads and greenlets from
using a socket at the same time.
Sequence:
gr0: start a slow find()
gr1: start a fast find()
gr1: get results
gr0: get results
"""
cx = get_connection(
use_greenlets=self.use_greenlets,
auto_start_request=False
)
db = cx.pymongo_test
db.test.remove(safe=True)
db.test.insert({'_id': 1}, safe=True)
history = []
def find_fast():
if use_request:
cx.start_request()
history.append('find_fast start')
# With greenlets and the old connection._Pool, this would throw
# AssertionError: "This event is already used by another
# greenlet"
self.assertEqual({'_id': 1}, db.test.find_one())
history.append('find_fast done')
if use_request:
cx.end_request()
def find_slow():
if use_request:
cx.start_request()
history.append('find_slow start')
# Javascript function that pauses N seconds per document
fn = delay(10)
if (is_mongos(db.connection) or not
version.at_least(db.connection, (1, 7, 2))):
# mongos doesn't support eval so we have to use $where
# which is less reliable in this context.
self.assertEqual(1, db.test.find({"$where": fn}).count())
else:
# 'nolock' allows find_fast to start and finish while we're
# waiting for this to complete.
self.assertEqual({'ok': 1.0, 'retval': True},
db.command('eval', fn, nolock=True))
history.append('find_slow done')
if use_request:
cx.end_request()
if self.use_greenlets:
gr0, gr1 = Greenlet(find_slow), Greenlet(find_fast)
gr0.start()
gr1.start_later(.1)
else:
gr0 = threading.Thread(target=find_slow)
gr0.setDaemon(True)
gr1 = threading.Thread(target=find_fast)
gr1.setDaemon(True)
gr0.start()
time.sleep(.1)
gr1.start()
gr0.join()
gr1.join()
self.assertEqual([
'find_slow start',
'find_fast start',
'find_fast done',
'find_slow done',
], history)
def test_pool(self):
self._test_pool(use_request=False)
def test_pool_request(self):
self._test_pool(use_request=True)
|
data_utils.py
|
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import multiprocessing as mp
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import warnings
import zipfile
from abc import abstractmethod
from contextlib import closing
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from ..utils.generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored).
# Returns
Path to the downloaded file
""" # noqa
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm == 'sha256') or
(algorithm == 'auto' and len(file_hash) == 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch which is not
the case with generators.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
# Arguments
uid: int, Sequence identifier
i: index
# Returns
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = mp.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Send current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool,
initargs=(seqs,))
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception:
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
if random_seed is not None:
ident = mp.current_process().ident
np.random.seed(random_seed + ident)
def next_sample(uid):
"""Get the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
# Arguments
uid: int, generator identifier
# Returns
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence, use_multiprocessing=False, wait_time=None,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
if wait_time is not None:
warnings.warn('`wait_time` is not used anymore.',
DeprecationWarning)
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
list(map(lambda f: f.wait(), last_ones))
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
"Your generator is NOT thread-safe."
"Keras requires a thread-safe generator when"
"`use_multiprocessing=False, workers > 1`."
"For more information see issue #1638.")
six.reraise(*sys.exc_info())
|
05lock_solveerror.py
|
from threading import Lock
from threading import Thread
import time
# 定义全局变量
num = 100
# 创建锁
lock = Lock()
def run(n):
print('子线程开始执行')
global num
global lock
for i in range(1000000):
# 100 = 100 + 6
# 100 = 100 - 9
# num = 91
# 获取锁
# try:
# lock.acquire()
# num = num + n
# num = num - n
# finally:
# # 操作完成之后需要释放锁
# lock.release()
# 使用上下文管理器, 会自动的获取锁,自动的释放锁。
with lock:
num = num + n
num = num - n
time.sleep(1)
# print(num)
print('子线程结束执行')
if __name__ == '__main__':
print('主线程开始执行')
t1 = Thread(target=run, args=(6,))
t2 = Thread(target=run, args=(9,))
t1.start()
t2.start()
t1.join()
t2.join()
print('num=', num)
print('主线程结束执行')
|
onnxruntime_test_python.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import onnxruntime as onnxrt
import threading
class TestInferenceSession(unittest.TestCase):
def get_name(self, name):
if os.path.exists(name):
return name
rel = os.path.join("testdata", name)
if os.path.exists(rel):
return rel
this = os.path.dirname(__file__)
data = os.path.join(this, "..", "testdata")
res = os.path.join(data, name)
if os.path.exists(res):
return res
raise FileNotFoundError(
"Unable to find '{0}' or '{1}' or '{2}'".format(name, rel, res))
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array(
[[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(self.get_name("mul_1.onnx"), sess_options=so)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
def testGetProviders(self):
self.assertTrue(
'CPUExecutionProvider' in onnxrt.get_available_providers())
self.assertTrue('CPUExecutionProvider' in onnxrt.get_all_providers())
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testInvalidSetProviders(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
sess.set_providers(['InvalidProvider'])
self.assertTrue('[\'InvalidProvider\'] does not contain a subset of available providers' in str(
context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(
self.get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array(
[[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(self.get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array(
[[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(self.get_name("matmul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(self.get_name("matmul_1.onnx"))
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:,[1,0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(
output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(
self.get_name("mul_1.onnx"), sess_options=so)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array(
[[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(self.get_name("matmul_2.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array(
[[True, False], [False, False]], dtype=np.bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'],
dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array(['Olá', '你好', '여보세요', 'hello'],
dtype=np.unicode).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'],
object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test'],
np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['this\x00\x00\x00\x00', 'is\x00\x00\x00\x00\x00\x00'],
['identity', 'test\x00\x00\x00\x00']], dtype=object)
np.testing.assert_equal(expr, res[0])
def testZipMapStringFloat(self):
sess = onnxrt.InferenceSession(
self.get_name("zipmap_stringfloat.onnx"))
x = np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0],
dtype=np.float32).reshape((2, 3))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(float)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Z")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(map(string,tensor(float)))')
output_expected = [{'class2': 0.0, 'class1': 1.0, 'class3': 3.0},
{'class2': 23.0, 'class1': 44.0, 'class3': 11.0}]
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testZipMapInt64Float(self):
sess = onnxrt.InferenceSession(self.get_name("zipmap_int64float.onnx"))
x = np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0],
dtype=np.float32).reshape((2, 3))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(float)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Z")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(map(int64,tensor(float)))')
output_expected = [{10: 1.0, 20: 0.0, 30: 3.0},
{10: 44.0, 20: 23.0, 30: 11.0}]
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path)
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(
self.get_name("mul_1.onnx"), sess_options=so)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue('[' in lines[0])
for i in range(1, 8):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[8])
def testDictVectorizer(self):
sess = onnxrt.InferenceSession(
self.get_name("pipeline_vectorize.onnx"))
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "float_input")
input_type = str(sess.get_inputs()[0].type)
self.assertEqual(input_type, "map(int64,tensor(float))")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "variable1")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(float)")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [1, 1])
# Python type
x = {0: 25.0, 1: 5.13, 2: 0.0, 3: 0.453, 4: 5.966}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
xwrong = x.copy()
xwrong["a"] = 5.6
try:
res = sess.run([output_name], {input_name: xwrong})
except RuntimeError as e:
self.assertIn(
"Unexpected key type <class 'str'>, it cannot be linked to C type int64_t", str(e))
# numpy type
x = {np.int64(k): np.float32(v) for k, v in x.items()}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
x = {np.int64(k): np.float64(v) for k, v in x.items()}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
x = {np.int32(k): np.float64(v) for k, v in x.items()}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testLabelEncoder(self):
sess = onnxrt.InferenceSession(self.get_name("LabelEncoder.onnx"))
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "input")
input_type = str(sess.get_inputs()[0].type)
self.assertEqual(input_type, "tensor(string)")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [1, 1])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "variable")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(int64)")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [1, 1])
# Array
x = np.array([['4']])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[3]], dtype=np.int64)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
# Python type
x = np.array(['4'], ndmin=2)
res = sess.run([output_name], {input_name: x})
output_expected = np.array([3], ndmin=2, dtype=np.int64)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
x = np.array(['4'], ndmin=2, dtype=np.object)
res = sess.run([output_name], {input_name: x})
output_expected = np.array([3], ndmin=2, dtype=np.int64)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def test_run_model_mlnet(self):
sess = onnxrt.InferenceSession(self.get_name("mlnet_encoder.onnx"))
names = [_.name for _ in sess.get_outputs()]
self.assertEqual(['C00', 'C12'], names)
c0 = np.array([5.], dtype=np.float32).reshape(1, 1)
c1 = np.array([b'A\0A\0', b"B\0B\0", b"C\0C\0"], np.void).reshape(1, 3)
res = sess.run(None, {'C0': c0, 'C1': c1})
mat = res[1]
total = mat.sum()
self.assertEqual(total, 2)
self.assertEqual(list(mat.ravel()),
list(np.array([[[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 1., 0.]]]).ravel()))
# In memory, the size of each element is fixed and equal to the
# longest element. We cannot use bytes because numpy is trimming
# every final 0 for strings and bytes before creating the array
# (to save space). It does not have this behaviour for void
# but as a result, numpy does not know anymore the size
# of each element, they all have the same size.
c1 = np.array([b'A\0A\0\0', b"B\0B\0", b"C\0C\0"],
np.void).reshape(1, 3)
res = sess.run(None, {'C0': c0, 'C1': c1})
mat = res[1]
total = mat.sum()
self.assertEqual(total, 0)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx"), sess_options=opt)
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
res = sess.run([], {'input1:0': a, 'input:0':b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(self.get_name("sequence_length.onnx"))
x = [np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(
self.get_name("sequence_construct.onnx"))
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(self.get_name("sequence_insert.onnx"), sess_options=opt)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {"tensor": np.array(
[1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)), "input_seq": []})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1)
sess = onnxrt.InferenceSession(self.get_name("model_with_valid_ort_config_json.onnx"))
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(session_options.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config)
self.assertEqual(session_options.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0)
if __name__ == '__main__':
unittest.main()
|
chat.py
|
# ██╗ ██╗██████╗ ███╗ ███╗███████╗ █████╗ ██╗
# ██║ ██║██╔══██╗████╗ ████║██╔════╝██╔══██╗██║
# ███████║██║ ██║██╔████╔██║█████╗ ███████║██║
# ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ██╔══██║██║
# ██║ ██║██████╔╝██║ ╚═╝ ██║███████╗██║ ██║███████╗
# ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝
# Copyright 2019-2020, Hyungyo Seo
# chat.py - Skill 응답 데이터를 만드는 스크립트입니다.
import datetime
import hashlib
import time
from itertools import groupby
from threading import Thread
from modules.chatbot import user
from modules.common import security, conf, log, get_data
from modules.common.parsers import league_of_legends_parser
# Skill 응답용 JSON 생성
def skill(msg):
return {'version': '2.0',
'data': {
'msg': msg
}
}
def skill_simpletext(msg):
return {'version': '2.0',
'template': {
'outputs': [
{
'simpleText': {
'text': msg
}
}
]
}
}
# 요일 처리
def wday(date):
if date.weekday() == 0:
return "월"
elif date.weekday() == 1:
return "화"
elif date.weekday() == 2:
return "수"
elif date.weekday() == 3:
return "목"
elif date.weekday() == 4:
return "금"
elif date.weekday() == 5:
return "토"
else:
return "일"
# 알러지정보
allergy_string = ["", "난류", "우유", "메밀", "땅콩", "대두", "밀", "고등어", "게", "새우", "돼지고기", "복숭아",
"토마토", "아황산류", "호두", "닭고기", "쇠고기", "오징어", "조개류"]
def getuserid(uid):
enc = hashlib.sha256()
enc.update(uid.encode("utf-8"))
return 'KT-' + enc.hexdigest()
def router(platform: str, uid: str, intent: str, params: dict, req_id: str, debugging: bool):
try:
if 'Briefing' in intent:
return briefing(uid, req_id, debugging)
elif 'Meal' in intent:
return meal(uid, params, req_id, debugging)
elif 'Timetable' in intent:
return timetable(platform, uid, params, req_id, debugging)
elif 'Schedule' in intent:
return schdl(params, req_id, debugging)
elif 'WaterTemperature' in intent:
return [get_data.wtemp(req_id, debugging)], None
elif 'UserSettings' in intent:
return user_settings(uid, req_id)
elif 'ModifyUserInfo' in intent:
return modify_user_info(params, uid, req_id, debugging)
elif 'LoL' in intent:
return lol(params, req_id, debugging)
else:
return ["잘못된 요청입니다.\n요청 ID: " + req_id], None
except OSError as e:
log.err("[#%s] router@chat.py: Uncaught Error %s" % (req_id, e))
return ["알 수 없는 오류가 발생했습니다.\n요청 ID: " + req_id], None
# 식단조회
def meal(uid: str, params: dict, req_id: str, debugging: bool):
try:
if not params['date']:
return ["언제의 급식을 조회하시겠어요?"], None
if isinstance(params['date'], datetime.datetime):
date: datetime = params['date']
if date.weekday() >= 5: # 주말
return ["급식을 실시하지 않습니다. (주말)"], None
meal = get_data.meal(date.year, date.month, date.day, req_id, debugging)
if "message" not in meal: # 파서 메시지 있는지 확인, 없으면 만들어서 응답
# 사용자 설정 불러오기
user_preferences = user.get_user(uid, req_id, debugging)[2]
if user_preferences.get('AllergyInfo') == 'None':
menus = [i[0] for i in meal["menu"]]
elif user_preferences.get('AllergyInfo') == 'FullText':
menus = []
for i in meal["menu"]:
if i[1]:
menus.append('%s(%s)' % (i[0], ', '.join(allergy_string[x] for x in i[1])))
else:
menus.append(i[0])
else:
menus = []
for i in meal["menu"]:
if i[1]:
menus.append('%s(%s)' % (i[0], ', '.join(str(x) for x in i[1])))
else:
menus.append(i[0])
return ["%s:\n%s\n\n열량: %s kcal" % (meal["date"], '\n'.join(menus), meal["kcal"])], None
if meal["message"] == "등록된 데이터가 없습니다.":
cal = get_data.schdl(date.year, date.month, date.day, req_id, debugging)
if not cal == "일정이 없습니다.":
return ["급식을 실시하지 않습니다. (%s)" % cal], None
return [meal["message"]], None
else:
return ["정확한 날짜를 입력해주세요.\n현재 식단조회에서는 여러날짜 조회를 지원하지 않습니다."], None
except ConnectionError:
return ["급식 서버에 연결하지 못했습니다.\n요청 ID: " + req_id], None
# 시간표 조회
def timetable(platform: str, uid: str, params: dict, req_id: str, debugging: bool):
suggest_to_register = False
try:
log.info("[#%s] tt_registered@chat.py: New Request" % req_id)
print(params)
if 'grade' in params and 'class' in params and params['grade'] and params['class']:
try:
tt_grade = int(params['grade'])
tt_class = int(params['class'])
except ValueError:
return ["올바른 숫자를 입력해 주세요."], None
if platform == 'KT':
suggest_to_register = True
else:
user_data = user.get_user(uid, req_id, debugging) # 사용자 정보 불러오기
tt_grade = user_data[0]
tt_class = user_data[1]
if not tt_grade or not tt_class:
if platform == 'KT':
return [{
"type": "card",
"title": "사용자 정보를 찾을 수 없습니다.",
"body": '"내 정보 관리"를 눌러 학년/반 정보를 등록 하시거나, '
'"1학년 1반 시간표 알려줘"와 같이 조회할 학년/반을 직접 언급해 주세요.',
"buttons": [
{
"type": "message",
"title": "내 정보 관리"
}
]
}], None
else:
return ['사용자 정보를 찾을 수 없습니다. "내 정보 관리"를 눌러 학년/반 정보를 등록해 주세요.'], None
if not params['date']:
return ["언제의 시간표를 조회하시겠어요?"], None
if isinstance(params['date'], datetime.datetime):
date: datetime = params['date']
if suggest_to_register:
return [get_data.tt(tt_grade, tt_class, date, req_id, debugging), {
"type": "card",
"title": "방금 입력하신 정보를 저장할까요?",
"body": "학년/반 정보를 등록하시면 다음부터 더 빠르고 편하게 이용하실 수 있습니다.",
"buttons": [
{
"type": "message",
"title": "네, 저장해 주세요.",
"postback": "사용자 정보 등록: %d학년 %d반" % (tt_grade, tt_class)
}
]
}], None
else:
return [get_data.tt(tt_grade, tt_class, date, req_id, debugging)], None
else:
return ["정확한 날짜를 입력해주세요.\n현재 시간표조회에서는 여러날짜 조회를 지원하지 않습니다."], None
except ConnectionError:
return ["시간표 서버에 연결하지 못했습니다.\n요청 ID: " + req_id], None
# 학사일정 조회
def schdl(params: dict, req_id: str, debugging: bool):
global msg
try:
log.info("[#%s] cal@chat.py: New Request" % req_id)
if "date" in params:
if not params['date']:
return ["언제의 학사일정을 조회하시겠어요?"], None
# 특정일자 조회
if isinstance(params['date'], datetime.datetime):
try:
date: datetime = params["date"]
except Exception:
log.err("[#%s] cal@chat.py: Error while Parsing Date" % req_id)
return ["오류가 발생했습니다.\n요청 ID: " + req_id], None
prsnt_schdl = get_data.schdl(date.year, date.month, date.day, req_id, debugging)
prsnt_schdl = prsnt_schdl
if prsnt_schdl:
msg = "%s-%s-%s(%s):\n%s" % (
str(date.year).zfill(4), str(date.month).zfill(2), str(date.day).zfill(2),
wday(date), prsnt_schdl) # YYYY-MM-DD(Weekday)
else:
msg = "일정이 없습니다."
# 특정일자 조회 끝
# 기간 조회
elif isinstance(params['date'], list): # 기간
body = str()
try:
start: datetime = params['date'][0] # 시작일 파싱
except Exception:
log.err("[#%s] cal@chat.py: Error while Parsing StartDate" % req_id)
return ["오류가 발생했습니다.\n요청 ID: " + req_id], None
try:
end: datetime = params['date'][1] # 종료일 파싱
except Exception:
log.err("[#%s] cal@chat.py: Error while Parsing EndDate" % req_id)
return ["오류가 발생했습니다.\n요청 ID: " + req_id], None
if (end - start).days > 90: # 90일 이상을 조회요청한 경우,
head = ("서버 성능상의 이유로 최대 90일까지만 조회가 가능합니다."
"\n조회기간이 %s부터 %s까지로 제한되었습니다.\n\n" %
(start.date(), (start + datetime.timedelta(days=90)).date()))
end = start + datetime.timedelta(days=90) # 종료일 앞당김
else:
head = "%s부터 %s까지 조회합니다.\n\n" % (start.date(), end.date())
schdls = get_data.schdl_mass(start, end, req_id, debugging)
# 년, 월, 일, 일정 정보를 담은 튜플이 리스트로 묶여서 반환됨
# body 쓰기, 연속되는 일정은 묶어 처리함
for content, group in groupby(schdls, lambda k: k[3]):
lst = [*group]
if lst[0] != lst[-1]:
start_date = datetime.date(*lst[0][:3])
end_date = datetime.date(*lst[-1][:3])
body = '%s%s(%s)~%s(%s):\n%s\n' % (
body, start_date, wday(start_date), end_date, wday(end_date), content)
else:
date = datetime.date(*lst[0][:3])
body = '%s%s(%s):\n%s\n' % (body, date, wday(date), content)
if not body:
body = "일정이 없습니다.\n"
msg = (head + body)[:-1] # 맨 끝의 줄바꿈을 제거함
# 기간 조회 끝
else: # 아무런 파라미터도 넘겨받지 못한 경우
log.info("[#%s] cal@chat.py: No Parameter" % req_id)
return ["언제의 학사일정을 조회하시겠어요?"], None
return [msg], None
except ConnectionError:
return ["학사일정 서버에 연결하지 못했습니다.\n요청 ID: " + req_id], None
# 급식봇 브리핑
def briefing(uid: str, req_id: str, debugging: bool):
log.info("[#%s] briefing@chat.py: New Request" % req_id)
global briefing_header, hd_err, briefing_schdl, briefing_weather, briefing_meal, briefing_meal_ga, briefing_tt
briefing_header = "알 수 없는 오류로 헤더를 불러올 수 없었습니다.\n나중에 다시 시도해 보세요."
briefing_schdl = "알 수 없는 오류로 학사일정을 불러올 수 없었습니다.\n나중에 다시 시도해 보세요."
briefing_weather = "알 수 없는 오류로 날씨를 불러올 수 없었습니다.\n나중에 다시 시도해 보세요."
briefing_meal = "알 수 없는 오류로 식단을 불러올 수 없었습니다.\n나중에 다시 시도해 보세요."
briefing_meal_ga = "알 수 없는 오류로 식단을 불러올 수 없었습니다.\n나중에 다시 시도해 보세요."
briefing_tt = "알 수 없는 오류로 시간표를 불러올 수 없었습니다.\n나중에 다시 시도해 보세요."
if datetime.datetime.now().time() >= datetime.time(17): # 오후 5시 이후이면
# 내일을 기준일로 설정
date = datetime.datetime.now() + datetime.timedelta(days=1)
date_ko = "내일"
else: # 9시 이전이면
# 오늘을 기준일로 설정
date = datetime.datetime.now()
date_ko = "오늘"
log.info("[#%s] briefing@chat.py: Date: %s" % (req_id, date))
def logging_time(original_fn):
def wrapper_fn(*args, **kwargs):
result = original_fn(*args, **kwargs)
if debugging:
start_time = time.time()
print("{} 실행.".format(original_fn.__name__))
end_time = time.time()
print("{} 종료. 실행시간: {} 초".format(original_fn.__name__, end_time - start_time))
return result
return wrapper_fn
# 첫 번째 말풍선
# 헤더
@logging_time
def f_header():
global briefing_header, hd_err
if date.weekday() >= 5: # 주말이면
log.info("[#%s] briefing@chat.py: Weekend" % req_id)
hd_err = "%s은 주말 입니다." % date_ko
else:
briefing_header = "%s은 %s(%s) 입니다." % (date_ko, date.date().isoformat(), wday(date))
hd_err = None
# 학사일정
@logging_time
def f_cal():
global briefing_schdl
try:
briefing_schdl = get_data.schdl(date.year, date.month, date.day, req_id, debugging)
if not briefing_schdl:
log.info("[#%s] briefing@chat.py: No Schedule" % req_id)
briefing_schdl = "%s은 학사일정이 없습니다." % date_ko
else:
briefing_schdl = "%s 학사일정:\n%s" % (date_ko, briefing_schdl)
except ConnectionError:
briefing_schdl = "학사일정 서버에 연결하지 못했습니다.\n나중에 다시 시도해 보세요."
# 두 번째 말풍선
# 날씨
@logging_time
def f_weather():
global briefing_weather
try:
briefing_weather = get_data.weather(date_ko, req_id, debugging)
except ConnectionError:
briefing_weather = "날씨 서버에 연결하지 못했습니다.\n나중에 다시 시도해 보세요."
# 세 번째 말풍선
# 급식
@logging_time
def f_meal():
global briefing_meal, briefing_meal_ga
try:
meal = get_data.meal(date.year, date.month, date.day, req_id, debugging)
if not "message" in meal: # 파서 메시지 있는지 확인, 없으면 만들어서 응답
briefing_meal_ga = "%s 급식은 %s 입니다." % (
date_ko, ', '.join(i[0] for i in meal["menu"]).replace('⭐', ''))
briefing_meal = "%s 급식:\n%s" % (date_ko, '\n'.join(i[0] for i in meal["menu"]))
elif meal["message"] == "등록된 데이터가 없습니다.":
log.info("[#%s] briefing@chat.py: No Meal" % req_id)
briefing_meal_ga = date_ko + "은 급식을 실시하지 않습니다."
briefing_meal = date_ko + "은 급식을 실시하지 않습니다."
except ConnectionError:
briefing_meal_ga = "급식 서버에 연결하지 못했습니다.\n나중에 다시 시도해 보세요."
briefing_meal = "급식 서버에 연결하지 못했습니다.\n나중에 다시 시도해 보세요."
# 시간표
@logging_time
def f_tt():
global briefing_tt
try:
user_data = user.get_user(uid, req_id, debugging) # 사용자 정보 불러오기
tt_grade = user_data[0]
tt_class = user_data[1]
if tt_grade is not None or tt_class is not None: # 사용자 정보 있을 때
tt = get_data.tt(tt_grade, tt_class, date, req_id, debugging)
if tt == "등록된 데이터가 없습니다.":
briefing_tt = "등록된 시간표가 없습니다."
else:
briefing_tt = "%s 시간표:\n%s" % (date_ko, tt.split('):\n')[1]) # 헤더부분 제거
else:
log.info("[#%s] briefing@chat.py: Non-Registered User" % req_id)
briefing_tt = "등록된 사용자만 시간표를 볼 수 있습니다."
except ConnectionError:
briefing_tt = "시간표 서버에 연결하지 못했습니다.\n나중에 다시 시도해 보세요."
except Exception as e:
log.err("[#%s] briefing@chat.py: Failed to Fetch Timetable because %s" % (req_id, e))
# 쓰레드 정의
th_header = Thread(target=f_header)
th_cal = Thread(target=f_cal)
th_weather = Thread(target=f_weather)
th_meal = Thread(target=f_meal)
th_tt = Thread(target=f_tt)
# 쓰레드 실행
th_header.start()
th_cal.start()
th_weather.start()
th_meal.start()
th_tt.start()
# 전 쓰레드 종료 시까지 기다리기
th_header.join()
if hd_err:
return [hd_err], None, '안녕하세요, 흥덕고 급식입니다.\n' + hd_err
th_cal.join()
th_weather.join()
th_meal.join()
th_tt.join()
# 구글어시스턴트 응답
ga_respns = '안녕하세요, 흥덕고 급식입니다.\n' + briefing_meal_ga
# 응답 만들기
return ["%s\n\n%s" % (briefing_header, briefing_schdl), briefing_weather,
"%s\n\n%s" % (briefing_meal, briefing_tt)], None, ga_respns
def lol(params, req_id, debugging):
log.info("[#%s] lol@chat.py: New Request" % req_id)
try:
summoner_name = params["summonerName"]
except KeyError:
return ["소환사명을 입력해주세요."], None
except Exception:
log.err("[#%s] lol@chat.py: Error while Parsing Summoner Name" % req_id)
return ["오류가 발생했습니다.\n요청 ID: " + req_id], None
# 소환사명 16자 초과하면
if len(summoner_name) > 16:
log.info("[#%s] lol@chat.py: Summoner Name is Too Long" % req_id)
return [{
"type": "card",
"title": "소환사명이 너무 김",
"body": "소환사명이 너무 깁니다.\n"
"소환사명은 영문 16자, 한글 8자 이내입니다.\n"
"잘못 입력하진 않았는지 확인해주세요.",
}], None
try:
summoner_data = league_of_legends_parser.parse(summoner_name, req_id, debugging)
except Exception as e:
if "timed out" in str(e):
return ["라이엇 서버에 연결하지 못했습니다.\n요청 ID: " + req_id], None
log.err("[#%s] lol@chat.py: Error while Parsing Summoner Data because %s" % (req_id, e))
return ["오류가 발생했습니다.\n요청 ID: " + req_id], None
if summoner_data == 'Invalid Token':
log.err("[#%s] lol@chat.py: Invalid Token" % req_id)
return ["오류가 발생했습니다.\n요청 ID: " + req_id], None
if summoner_data:
# 솔랭 전적 구하기
if summoner_data["ranked_solo"]:
solo = ("솔랭 전적:\n"
"%s %s (%s LP)\n"
"%s승 %s패 (%s%%)\n\n" %
(summoner_data["ranked_solo"]["tier"], summoner_data["ranked_solo"]["rank"],
summoner_data["ranked_solo"]["leaguePoints"],
summoner_data["ranked_solo"]["wins"],
summoner_data["ranked_solo"]["losses"],
summoner_data["ranked_solo"]["winningRate"]))
else:
solo = "솔랭 전적이 없습니다. 분발하세요!\n\n"
# 자유랭 전적 구하기
if summoner_data["ranked_flex"]:
flex = ("자유랭 전적:\n"
"%s %s (%s LP)\n"
"%s승 %s패 (%s%%)\n\n" %
(summoner_data["ranked_flex"]["tier"], summoner_data["ranked_flex"]["rank"],
summoner_data["ranked_flex"]["leaguePoints"],
summoner_data["ranked_flex"]["wins"],
summoner_data["ranked_flex"]["losses"],
summoner_data["ranked_flex"]["winningRate"]))
else:
flex = "자유랭 전적이 없습니다. 분발하세요!\n\n"
# 통계 내기
if summoner_data["games"]:
if summoner_data["preferredLane"]:
preferred_lane = "%s(%s%%)" % (summoner_data["preferredLane"][0], summoner_data["preferredLane"][1])
else:
preferred_lane = "정보없음"
if summoner_data["preferredChampion"]:
preferred_champion = ("%s(%s%%)" %
(summoner_data["preferredChampion"][0], summoner_data["preferredChampion"][1]))
else:
preferred_champion = "정보없음"
preferences = ("최근 %s번의 매치를 바탕으로 분석한 결과입니다:\n"
"많이한 라인: %s\n"
"많이한 챔피언: %s") % (summoner_data["games"], preferred_lane, preferred_champion)
else:
preferences = "통계가 없습니다. 분발하세요!"
return [{
"type": "card",
"title": "%s (레벨 %s)" % (summoner_data["summoner"]["name"], summoner_data["summoner"]["level"]),
"body": solo + flex + preferences,
'image': summoner_data["summoner"]["profileIcon"],
"buttons": [
{
"type": "web",
"title": "OP.GG에서 보기",
"url": summoner_data["summoner"]["OPGG"]
},
{
"type": "message",
"title": "다른 소환사 검색하기"
}
]
}], None
else:
return [{
"type": "card",
"title": "소환사를 찾을 수 없음",
"body": summoner_name + " 소환사를 찾을 수 없습니다.\n"
"한국 서버에 등록된 소환사가 맞는지, "
"잘못 입력하진 않았는지 확인해주세요.",
"buttons": [
{
"type": "message",
"title": "다시 검색하기"
}
]
}], None
def user_settings(uid: str, req_id: str):
url = conf.configs['Misc']['Settings']['BaseURL']
return [{
"type": "card",
"title": "내 정보 관리",
"body": "아래 버튼을 클릭해 관리 페이지로 접속해 주세요.\n"
"링크는 10분 뒤 만료됩니다.",
"buttons": [
{
"type": "web",
"title": "내 정보 관리",
"url": url + "?token=" + security.generate_token('UserSettings', uid,
['GetUserInfo', 'ManageUserInfo', 'GetUsageData',
'DeleteUsageData'], req_id)
}
]
}], None
def modify_user_info(params: dict, uid: str, req_id: str, debugging: bool):
try:
user.manage_user(uid, int(params['grade']), int(params['class']), {}, req_id, debugging)
except KeyError:
return ["변경할 학년/반 정보를 입력해 주세요."], None
except ValueError:
return ["올바른 숫자를 입력해 주세요."], None
return ["저장되었습니다."], None
# 디버그
if __name__ == "__main__":
log.init()
|
Battery_notification.py
|
# use terminal to install
"""pip install psutil
pip install pyttsx3
pip install win10toast"""
import psutil
import time
import pyttsx3
from win10toast import ToastNotifier # also need to install win32api
import threading
toaster = ToastNotifier()
x=pyttsx3.init()
x.setProperty('rate',110)
x.setProperty('volume',3)
count = 0
def show_notification(show_text):
toaster.show_toast(show_text,
icon_path='battery_indicator.ico',
duration=10)
# loop the toaster over some period of time
while toaster.notification_active():
time.sleep(0.005)
def monitor():
while (True):
time.sleep(1)
battery = psutil.sensors_battery()
plugged = battery.power_plugged
percent = int(battery.percent)
if percent < 35:
if plugged == False:
processThread = threading.Thread(target=show_notification, args=("Your Battery at "+str(percent)+"% Please plug the cable",)) # <- note extra ','
processThread.start()
x.say("Your battery is getting low so charge it right now")
x.runAndWait()
elif percent >= 98:
if plugged == True:
processThread = threading.Thread(target=show_notification, args=("Charging is getting complete",)) # <- note extra ','
processThread.start()
x.say("Charging is getting complete")
x.runAndWait()
if __name__ == "__main__":
monitor()
|
system_controller.py
|
import alsaaudio
import pulsectl
import threading
from config import TEST_ENV
import pydbus
import time
from config import logger
class SystemController():
def __init__(self):
self.system_volume = alsaaudio.Mixer().getvolume()[0]
def get_volume(self):
return self.system_volume
def set_volume(self, vol):
th = threading.Thread(target=self.__set_system_volume, args=(vol,))
th.start()
self.system_volume = vol
def __set_system_volume(self, vol):
m = alsaaudio.Mixer()
m.setvolume(vol)
class Audioctl():
def __init__(self):
self.pulse = pulsectl.Pulse('my-client-name')
def get_audio_output_devices(self):
result = self.pulse.sink_list()
output_devices = []
for path in result:
output_devices.append({'name': path.description, 'index' : path.index, 'connected' : True})
return output_devices
def select(self, device):
result = self.pulse.sink_input_list()
for path in result:
self.pulse.sink_input_move(path.index ,device['index'])
class Bluetoothctl():
def __init__(self):
self.bluez_service = 'org.bluez'
self.adapter_path = '/org/bluez/hci0'
self.bus = pydbus.SystemBus()
self.adapter = self.bus.get(self.bluez_service, self.adapter_path)
self.mngr = self.bus.get(self.bluez_service, '/')
def get_paired_devices(self):
return self.get_devices('Paired')
def get_connected_devices(self):
return self.get_devices('Connected')
def get_devices(self, filter):
mngd_objs = self.mngr.GetManagedObjects()
paired_devices = []
for path in mngd_objs:
con_state = mngd_objs[path].get('org.bluez.Device1', {}).get(filter, False)
if con_state:
addr = mngd_objs[path].get('org.bluez.Device1', {}).get('Address')
icon = mngd_objs[path].get('org.bluez.Device1', {}).get('Icon')
connected = mngd_objs[path].get('org.bluez.Device1', {}).get('Connected')
name = ('☑ ' if connected else '☐ ') + mngd_objs[path].get('org.bluez.Device1', {}).get('Name')
paired_devices.append({'name': name, 'mac_address' : addr, 'icon' : icon, 'connected' : connected})
return paired_devices
def toggle(self, device):
if(device['connected']):
logger.debug(device['name'] + " was connected. Disconnecting")
return self.disconnect(device['mac_address'])
else :
logger.debug(device['name'] + " was disconnected. Connecting")
return self.connect(device['mac_address'])
def disconnect(self, mac_address):
device_path = f"{self.adapter_path}/dev_{mac_address.replace(':', '_')}"
device = self.bus.get(self.bluez_service, device_path)
device.Disconnect()
def connect(self, mac_address):
device_path = f"{self.adapter_path}/dev_{mac_address.replace(':', '_')}"
device = self.bus.get(self.bluez_service, device_path)
device.Connect()
|
xvfb.py
|
#!/usr/bin/env vpython
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests with Xvfb and Openbox or Weston on Linux and normally on other
platforms."""
from __future__ import print_function
import copy
import os
import os.path
import psutil
import random
import re
import signal
import subprocess
import sys
import threading
import time
import test_env
class _XvfbProcessError(Exception):
"""Exception raised when Xvfb cannot start."""
pass
class _WestonProcessError(Exception):
"""Exception raised when Weston cannot start."""
pass
def kill(proc, name, timeout_in_seconds=10):
"""Tries to kill |proc| gracefully with a timeout for each signal."""
if not proc:
return
proc.terminate()
thread = threading.Thread(target=proc.wait)
thread.start()
thread.join(timeout_in_seconds)
if thread.is_alive():
print('%s running after SIGTERM, trying SIGKILL.\n' % name, file=sys.stderr)
proc.kill()
thread.join(timeout_in_seconds)
if thread.is_alive():
print('%s running after SIGTERM and SIGKILL; good luck!\n' % name,
file=sys.stderr)
def launch_dbus(env):
"""Starts a DBus session.
Works around a bug in GLib where it performs operations which aren't
async-signal-safe (in particular, memory allocations) between fork and exec
when it spawns subprocesses. This causes threads inside Chrome's browser and
utility processes to get stuck, and this harness to hang waiting for those
processes, which will never terminate. This doesn't happen on users'
machines, because they have an active desktop session and the
DBUS_SESSION_BUS_ADDRESS environment variable set, but it can happen on
headless environments. This is fixed by glib commit [1], but this workaround
will be necessary until the fix rolls into Chromium's CI.
[1] f2917459f745bebf931bccd5cc2c33aa81ef4d12
Modifies the passed in environment with at least DBUS_SESSION_BUS_ADDRESS and
DBUS_SESSION_BUS_PID set.
Returns the pid of the dbus-daemon if started, or None otherwise.
"""
if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
return
try:
dbus_output = subprocess.check_output(
['dbus-launch'], env=env).decode('utf-8').split('\n')
for line in dbus_output:
m = re.match(r'([^=]+)\=(.+)', line)
if m:
env[m.group(1)] = m.group(2)
return int(env['DBUS_SESSION_BUS_PID'])
except (subprocess.CalledProcessError, OSError, KeyError, ValueError) as e:
print('Exception while running dbus_launch: %s' % e)
# TODO(crbug.com/949194): Encourage setting flags to False.
def run_executable(
cmd, env, stdoutfile=None, use_openbox=True, use_xcompmgr=True):
"""Runs an executable within Weston or Xvfb on Linux or normally on other
platforms.
The method sets SIGUSR1 handler for Xvfb to return SIGUSR1
when it is ready for connections.
https://www.x.org/archive/X11R7.5/doc/man/man1/Xserver.1.html under Signals.
Args:
cmd: Command to be executed.
env: A copy of environment variables. "DISPLAY" and will be set if Xvfb is
used. "WAYLAND_DISPLAY" will be set if Weston is used.
stdoutfile: If provided, symbolization via script is disabled and stdout
is written to this file as well as to stdout.
use_openbox: A flag to use openbox process.
Some ChromeOS tests need a window manager.
use_xcompmgr: A flag to use xcompmgr process.
Some tests need a compositing wm to make use of transparent visuals.
Returns:
the exit code of the specified commandline, or 1 on failure.
"""
# It might seem counterintuitive to support a --no-xvfb flag in a script
# whose only job is to start xvfb, but doing so allows us to consolidate
# the logic in the layers of buildbot scripts so that we *always* use
# xvfb by default and don't have to worry about the distinction, it
# can remain solely under the control of the test invocation itself.
use_xvfb = True
if '--no-xvfb' in cmd:
use_xvfb = False
cmd.remove('--no-xvfb')
# Tests that run on Linux platforms with Ozone/Wayland backend require
# a Weston instance. However, it is also required to disable xvfb so
# that Weston can run in a pure headless environment.
use_weston = False
if '--use-weston' in cmd:
if use_xvfb:
print('Unable to use Weston with xvfb.\n', file=sys.stderr)
return 1
use_weston = True
cmd.remove('--use-weston')
if sys.platform.startswith('linux') and use_xvfb:
return _run_with_xvfb(cmd, env, stdoutfile, use_openbox, use_xcompmgr)
elif use_weston:
return _run_with_weston(cmd, env, stdoutfile)
else:
return test_env.run_executable(cmd, env, stdoutfile)
def _run_with_xvfb(cmd, env, stdoutfile, use_openbox, use_xcompmgr):
openbox_proc = None
xcompmgr_proc = None
xvfb_proc = None
xwmstartupcheck_proc = None
xvfb_ready = MutableBoolean()
def set_xvfb_ready(*_):
xvfb_ready.setvalue(True)
dbus_pid = None
try:
signal.signal(signal.SIGTERM, raise_xvfb_error)
signal.signal(signal.SIGINT, raise_xvfb_error)
# Before [1], the maximum number of X11 clients was 256. After, the default
# limit is 256 with a configurable maximum of 512. On systems with a large
# number of CPUs, the old limit of 256 may be hit for certain test suites
# [2] [3], so we set the limit to 512 when possible. This flag is not
# available on Ubuntu 16.04 or 18.04, so a feature check is required. Xvfb
# does not have a '-version' option, so checking the '-help' output is
# required.
#
# [1] d206c240c0b85c4da44f073d6e9a692afb6b96d2
# [2] https://crbug.com/1187948
# [3] https://crbug.com/1120107
xvfb_help = subprocess.check_output(
['Xvfb', '-help'], stderr=subprocess.STDOUT).decode('utf8')
# Due to race condition for display number, Xvfb might fail to run.
# If it does fail, try again up to 10 times, similarly to xvfb-run.
for _ in range(10):
xvfb_ready.setvalue(False)
display = find_display()
xvfb_cmd = ['Xvfb', display, '-screen', '0', '1280x800x24', '-ac',
'-nolisten', 'tcp', '-dpi', '96', '+extension', 'RANDR']
if '-maxclients' in xvfb_help:
xvfb_cmd += ['-maxclients', '512']
# Sets SIGUSR1 to ignore for Xvfb to signal current process
# when it is ready. Due to race condition, USR1 signal could be sent
# before the process resets the signal handler, we cannot rely on
# signal handler to change on time.
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
xvfb_proc = subprocess.Popen(xvfb_cmd, stderr=subprocess.STDOUT, env=env)
signal.signal(signal.SIGUSR1, set_xvfb_ready)
for _ in range(10):
time.sleep(.1) # gives Xvfb time to start or fail.
if xvfb_ready.getvalue() or xvfb_proc.poll() is not None:
break # xvfb sent ready signal, or already failed and stopped.
if xvfb_proc.poll() is None:
break # xvfb is running, can proceed.
if xvfb_proc.poll() is not None:
raise _XvfbProcessError('Failed to start after 10 tries')
env['DISPLAY'] = display
dbus_pid = launch_dbus(env)
if use_openbox:
# This is not ideal, but x11_unittests require that (other X11 tests have
# a race with the openbox as well, but they take more time to initialize.
# And thus, they do no time out compate to the x11_unittests that are
# quick enough to start up before openbox is ready.
# TODO(dpranke): remove this nasty hack once the test() template is
# reworked.
wait_for_openbox = False
wait_openbox_program = './xwmstartupcheck'
if not os.path.isfile(wait_openbox_program):
wait_for_openbox = False
# Creates a dummy window that waits for a ReparentNotify event that is
# sent whenever Openbox WM starts. Must be started before the OpenBox WM
# so that it does not miss the event. This helper program is located in
# the current build directory. The program terminates automatically after
# 30 seconds of waiting for the event.
if wait_for_openbox:
xwmstartupcheck_proc = subprocess.Popen(
wait_openbox_program, stderr=subprocess.STDOUT, env=env)
openbox_proc = subprocess.Popen(
['openbox', '--sm-disable'], stderr=subprocess.STDOUT, env=env)
# Wait until execution is done. Does not block if the process has already
# been terminated. In that case, it's safe to read the return value.
if wait_for_openbox:
xwmstartupcheck_proc.wait()
if xwmstartupcheck_proc.returncode != 0:
raise _XvfbProcessError('Failed to get OpenBox up.')
if use_xcompmgr:
xcompmgr_proc = subprocess.Popen(
'xcompmgr', stderr=subprocess.STDOUT, env=env)
return test_env.run_executable(cmd, env, stdoutfile)
except OSError as e:
print('Failed to start Xvfb or Openbox: %s\n' % str(e), file=sys.stderr)
return 1
except _XvfbProcessError as e:
print('Xvfb fail: %s\n' % str(e), file=sys.stderr)
return 1
finally:
kill(openbox_proc, 'openbox')
kill(xcompmgr_proc, 'xcompmgr')
kill(xvfb_proc, 'Xvfb')
# dbus-daemon is not a subprocess, so we can't SIGTERM+waitpid() on it.
# To ensure it exits, use SIGKILL which should be safe since all other
# processes that it would have been servicing have exited.
if dbus_pid:
os.kill(dbus_pid, signal.SIGKILL)
# TODO(https://crbug.com/1060466): Write tests.
def _run_with_weston(cmd, env, stdoutfile):
weston_proc = None
try:
signal.signal(signal.SIGTERM, raise_weston_error)
signal.signal(signal.SIGINT, raise_weston_error)
dbus_pid = launch_dbus(env)
# The bundled weston (//third_party/weston) is used by Linux Ozone Wayland
# CI and CQ testers and compiled by //ui/ozone/platform/wayland whenever
# there is a dependency on the Ozone/Wayland and use_bundled_weston is set
# in gn args. However, some tests do not require Wayland or do not use
# //ui/ozone at all, but still have --use-weston flag set by the
# OZONE_WAYLAND variant (see //testing/buildbot/variants.pyl). This results
# in failures and those tests cannot be run because of the exception that
# informs about missing weston binary. Thus, to overcome the issue before
# a better solution is found, add a check for the "weston" binary here and
# run tests without Wayland compositor if the weston binary is not found.
# TODO(https://1178788): find a better solution.
if not os.path.isfile("./weston"):
print('Weston is not available. Starting without Wayland compositor')
return test_env.run_executable(cmd, env, stdoutfile)
# Set $XDG_RUNTIME_DIR if it is not set.
_set_xdg_runtime_dir(env)
# Weston is compiled along with the Ozone/Wayland platform, and is
# fetched as data deps. Thus, run it from the current directory.
#
# Weston is used with the following flags:
# 1) --backend=headless-backend.so - runs Weston in a headless mode
# that does not require a real GPU card.
# 2) --idle-time=0 - disables idle timeout, which prevents Weston
# to enter idle state. Otherwise, Weston stops to send frame callbacks,
# and tests start to time out (this typically happens after 300 seconds -
# the default time after which Weston enters the idle state).
# 3) --width && --height set size of a virtual display: we need to set
# an adequate size so that tests can have more room for managing size
# of windows.
# 4) --use-gl - Runs Weston using hardware acceleration instead of
# SwiftShader.
weston_cmd = ['./weston', '--backend=headless-backend.so', '--idle-time=0',
'--width=1024', '--height=768', '--modules=test-plugin.so']
if '--weston-use-gl' in cmd:
weston_cmd.append('--use-gl')
cmd.remove('--weston-use-gl')
if '--weston-debug-logging' in cmd:
cmd.remove('--weston-debug-logging')
env = copy.deepcopy(env)
env['WAYLAND_DEBUG'] = '1'
weston_proc_display = None
for _ in range(10):
weston_proc = subprocess.Popen(
weston_cmd,
stderr=subprocess.STDOUT, env=env)
# Get the $WAYLAND_DISPLAY set by Weston and pass it to the test launcher.
# Please note that this env variable is local for the process. That's the
# reason we have to read it from Weston separately.
weston_proc_display = _get_display_from_weston(weston_proc.pid)
if weston_proc_display is not None:
break # Weston could launch and we found the display.
# If we couldn't find the display after 10 tries, raise an exception.
if weston_proc_display is None:
raise _WestonProcessError('Failed to start Weston.')
env['WAYLAND_DISPLAY'] = weston_proc_display
return test_env.run_executable(cmd, env, stdoutfile)
except OSError as e:
print('Failed to start Weston: %s\n' % str(e), file=sys.stderr)
return 1
except _WestonProcessError as e:
print('Weston fail: %s\n' % str(e), file=sys.stderr)
return 1
finally:
kill(weston_proc, 'weston')
# dbus-daemon is not a subprocess, so we can't SIGTERM+waitpid() on it.
# To ensure it exits, use SIGKILL which should be safe since all other
# processes that it would have been servicing have exited.
if dbus_pid:
os.kill(dbus_pid, signal.SIGKILL)
def _get_display_from_weston(weston_proc_pid):
"""Retrieves $WAYLAND_DISPLAY set by Weston.
Searches for the child "weston-desktop-shell" process, takes its
environmental variables, and returns $WAYLAND_DISPLAY variable set
by that process. If the variable is not set, tries up to 10 times
and then gives up.
Args:
weston_proc_pid: The process of id of the main Weston process.
Returns:
the display set by Wayland, which clients can use to connect to.
TODO(https://crbug.com/1060469): This is potentially error prone
function. See the bug for further details.
"""
# Try 100 times as it is not known when Weston spawn child desktop shell
# process. The most seen so far is ~50 checks/~2.5 seconds, but startup
# is usually almost instantaneous.
for _ in range(100):
# gives weston time to start or fail.
time.sleep(.05)
# Take the parent process.
parent = psutil.Process(weston_proc_pid)
if parent is None:
break # The process is not found. Give up.
# Traverse through all the children processes and find the
# "weston-desktop-shell" process that sets local to process env variables
# including the $WAYLAND_DISPLAY.
children = parent.children(recursive=True)
for process in children:
if process.name() == "weston-desktop-shell":
weston_proc_display = process.environ().get('WAYLAND_DISPLAY')
# If display is set, Weston could start successfully and we can use
# that display for Wayland connection in Chromium.
if weston_proc_display is not None:
return weston_proc_display
return None
class MutableBoolean(object):
"""Simple mutable boolean class. Used to be mutated inside an handler."""
def __init__(self):
self._val = False
def setvalue(self, val):
assert isinstance(val, bool)
self._val = val
def getvalue(self):
return self._val
def raise_xvfb_error(*_):
raise _XvfbProcessError('Terminated')
def raise_weston_error(*_):
raise _WestonProcessError('Terminated')
def find_display():
"""Iterates through X-lock files to find an available display number.
The lower bound follows xvfb-run standard at 99, and the upper bound
is set to 119.
Returns:
A string of a random available display number for Xvfb ':{99-119}'.
Raises:
_XvfbProcessError: Raised when displays 99 through 119 are unavailable.
"""
available_displays = [
d for d in range(99, 120)
if not os.path.isfile('/tmp/.X{}-lock'.format(d))
]
if available_displays:
return ':{}'.format(random.choice(available_displays))
raise _XvfbProcessError('Failed to find display number')
def _set_xdg_runtime_dir(env):
"""Sets the $XDG_RUNTIME_DIR variable if it hasn't been set before."""
runtime_dir = env.get('XDG_RUNTIME_DIR')
if not runtime_dir:
runtime_dir = '/tmp/xdg-tmp-dir/'
if not os.path.exists(runtime_dir):
os.makedirs(runtime_dir, 0o700)
env['XDG_RUNTIME_DIR'] = runtime_dir
def main():
usage = 'Usage: xvfb.py [command [--no-xvfb or --use-weston] args...]'
if len(sys.argv) < 2:
print(usage + '\n', file=sys.stderr)
return 2
# If the user still thinks the first argument is the execution directory then
# print a friendly error message and quit.
if os.path.isdir(sys.argv[1]):
print('Invalid command: \"%s\" is a directory\n' % sys.argv[1],
file=sys.stderr)
print(usage + '\n', file=sys.stderr)
return 3
return run_executable(sys.argv[1:], os.environ.copy())
if __name__ == '__main__':
sys.exit(main())
|
MPtest.py
|
import multiprocessing
import time
import matplotlib.pyplot as plt
import random
a=[]
def fuuc(a,q):
for i in range(1,5):
time.sleep(1)
j = random.gauss(4,2)
a = a + [i]
q.put(a)
if __name__ == '__main__':
q1 = multiprocessing.Queue()
p1 = multiprocessing.Process(target = fuuc, args = (a,q1))
q2 = multiprocessing.Queue()
p2 = multiprocessing.Process(target = fuuc, args = (a*2,q2))
p1.start()
p2.start()
print("before join")
# p1.join()
# p2.join()
a1 = q1.get()
a2 = q2.get()
print(a1, a2)
print('after join')
print(a1, a2)
# plt.hist(a1, 50)
# plt.hist(a2, 50)
# plt.show()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from py4j.java_collections import ListConverter
from py4j.java_gateway import java_import, JavaObject
from pyspark import RDD, SparkConf
from pyspark.serializers import UTF8Deserializer, CloudPickleSerializer
from pyspark.context import SparkContext
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.dstream import DStream
from pyspark.streaming.util import TransformFunction, TransformFunctionSerializer
__all__ = ["StreamingContext"]
def _daemonize_callback_server():
"""
Hack Py4J to daemonize callback server
The thread of callback server has daemon=False, it will block the driver
from exiting if it's not shutdown. The following code replace `start()`
of CallbackServer with a new version, which set daemon=True for this
thread.
Also, it will update the port number (0) with real port
"""
# TODO: create a patch for Py4J
import socket
import py4j.java_gateway
logger = py4j.java_gateway.logger
from py4j.java_gateway import Py4JNetworkError
from threading import Thread
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
1)
try:
self.server_socket.bind((self.address, self.port))
if not self.port:
# update port with real port
self.port = self.server_socket.getsockname()[1]
except Exception as e:
msg = 'An error occurred while trying to start the callback server: %s' % e
logger.exception(msg)
raise Py4JNetworkError(msg)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
py4j.java_gateway.CallbackServer.start = start
class StreamingContext(object):
"""
Main entry point for Spark Streaming functionality. A StreamingContext
represents the connection to a Spark cluster, and can be used to create
L{DStream} various input sources. It can be from an existing L{SparkContext}.
After creating and transforming DStreams, the streaming computation can
be started and stopped using `context.start()` and `context.stop()`,
respectively. `context.awaitTermination()` allows the current thread
to wait for the termination of the context by `stop()` or by an exception.
"""
_transformerSerializer = None
def __init__(self, sparkContext, batchDuration=None, jssc=None):
"""
Create a new StreamingContext.
@param sparkContext: L{SparkContext} object.
@param batchDuration: the time interval (in seconds) at which streaming
data will be divided into batches
"""
self._sc = sparkContext
self._jvm = self._sc._jvm
self._jssc = jssc or self._initialize_context(self._sc, batchDuration)
def _initialize_context(self, sc, duration):
self._ensure_initialized()
return self._jvm.JavaStreamingContext(sc._jsc, self._jduration(duration))
def _jduration(self, seconds):
"""
Create Duration object given number of seconds
"""
return self._jvm.Duration(int(seconds * 1000))
@classmethod
def _ensure_initialized(cls):
SparkContext._ensure_initialized()
gw = SparkContext._gateway
java_import(gw.jvm, "org.apache.spark.streaming.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.java.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.python.*")
# start callback server
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__:
_daemonize_callback_server()
# use random port
gw._start_callback_server(0)
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
gw.jvm.PythonDStream.updatePythonGatewayPort(jgws, gw._python_proxy_port)
# register serializer for TransformFunction
# it happens before creating SparkContext when loading from checkpointing
cls._transformerSerializer = TransformFunctionSerializer(
SparkContext._active_spark_context, CloudPickleSerializer(), gw)
@classmethod
def getOrCreate(cls, checkpointPath, setupFunc):
"""
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
recreated from the checkpoint data. If the data does not exist, then the provided setupFunc
will be used to create a JavaStreamingContext.
@param checkpointPath: Checkpoint directory used in an earlier JavaStreamingContext program
@param setupFunc: Function to create a new JavaStreamingContext and setup DStreams
"""
# TODO: support checkpoint in HDFS
if not os.path.exists(checkpointPath) or not os.listdir(checkpointPath):
ssc = setupFunc()
ssc.checkpoint(checkpointPath)
return ssc
cls._ensure_initialized()
gw = SparkContext._gateway
try:
jssc = gw.jvm.JavaStreamingContext(checkpointPath)
except Exception:
print >>sys.stderr, "failed to load StreamingContext from checkpoint"
raise
jsc = jssc.sparkContext()
conf = SparkConf(_jconf=jsc.getConf())
sc = SparkContext(conf=conf, gateway=gw, jsc=jsc)
# update ctx in serializer
SparkContext._active_spark_context = sc
cls._transformerSerializer.ctx = sc
return StreamingContext(sc, None, jssc)
@property
def sparkContext(self):
"""
Return SparkContext which is associated with this StreamingContext.
"""
return self._sc
def start(self):
"""
Start the execution of the streams.
"""
self._jssc.start()
def awaitTermination(self, timeout=None):
"""
Wait for the execution to stop.
@param timeout: time to wait in seconds
"""
if timeout is None:
self._jssc.awaitTermination()
else:
self._jssc.awaitTermination(int(timeout * 1000))
def stop(self, stopSparkContext=True, stopGraceFully=False):
"""
Stop the execution of the streams, with option of ensuring all
received data has been processed.
@param stopSparkContext: Stop the associated SparkContext or not
@param stopGracefully: Stop gracefully by waiting for the processing
of all received data to be completed
"""
self._jssc.stop(stopSparkContext, stopGraceFully)
if stopSparkContext:
self._sc.stop()
def remember(self, duration):
"""
Set each DStreams in this context to remember RDDs it generated
in the last given duration. DStreams remember RDDs only for a
limited duration of time and releases them for garbage collection.
This method allows the developer to specify how to long to remember
the RDDs (if the developer wishes to query old data outside the
DStream computation).
@param duration: Minimum duration (in seconds) that each DStream
should remember its RDDs
"""
self._jssc.remember(self._jduration(duration))
def checkpoint(self, directory):
"""
Sets the context to periodically checkpoint the DStream operations for master
fault-tolerance. The graph will be checkpointed every batch interval.
@param directory: HDFS-compatible directory where the checkpoint data
will be reliably stored
"""
self._jssc.checkpoint(directory)
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_SER_2):
"""
Create an input from TCP source hostname:port. Data is received using
a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited
lines.
@param hostname: Hostname to connect to for receiving data
@param port: Port to connect to for receiving data
@param storageLevel: Storage level to use for storing the received objects
"""
jlevel = self._sc._getJavaStorageLevel(storageLevel)
return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self,
UTF8Deserializer())
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
def _check_serializers(self, rdds):
# make sure they have same serializer
if len(set(rdd._jrdd_deserializer for rdd in rdds)) > 1:
for i in range(len(rdds)):
# reset them to sc.serializer
rdds[i] = rdds[i]._reserialize()
def queueStream(self, rdds, oneAtATime=True, default=None):
"""
Create an input stream from an queue of RDDs or list. In each batch,
it will process either one or all of the RDDs returned by the queue.
NOTE: changes to the queue after the stream is created will not be recognized.
@param rdds: Queue of RDDs
@param oneAtATime: pick one rdd each time or pick all of them once.
@param default: The default rdd if no more in rdds
"""
if default and not isinstance(default, RDD):
default = self._sc.parallelize(default)
if not rdds and default:
rdds = [rdds]
if rdds and not isinstance(rdds[0], RDD):
rdds = [self._sc.parallelize(input) for input in rdds]
self._check_serializers(rdds)
jrdds = ListConverter().convert([r._jrdd for r in rdds],
SparkContext._gateway._gateway_client)
queue = self._jvm.PythonDStream.toRDDQueue(jrdds)
if default:
default = default._reserialize(rdds[0]._jrdd_deserializer)
jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd)
else:
jdstream = self._jssc.queueStream(queue, oneAtATime)
return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
def transform(self, dstreams, transformFunc):
"""
Create a new DStream in which each RDD is generated by applying
a function on RDDs of the DStreams. The order of the JavaRDDs in
the transform function parameter will be the same as the order
of corresponding DStreams in the list.
"""
jdstreams = ListConverter().convert([d._jdstream for d in dstreams],
SparkContext._gateway._gateway_client)
# change the final serializer to sc.serializer
func = TransformFunction(self._sc,
lambda t, *rdds: transformFunc(rdds).map(lambda x: x),
*[d._jrdd_deserializer for d in dstreams])
jfunc = self._jvm.TransformFunction(func)
jdstream = self._jssc.transform(jdstreams, jfunc)
return DStream(jdstream, self, self._sc.serializer)
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
first = dstreams[0]
jrest = ListConverter().convert([d._jdstream for d in dstreams[1:]],
SparkContext._gateway._gateway_client)
return DStream(self._jssc.union(first._jdstream, jrest), self, first._jrdd_deserializer)
|
main.py
|
# 7th draft of reset countdown timer.
# By: KraftyKraken - octonink@gmail.com
# Created: 4/7/2021
import _tkinter
import dateutil.relativedelta as rel
import datetime
import winsound
import time
import threading
from tkinter import *
from tkinter import messagebox
# creating window and name it.
root = Tk()
root.geometry("200x140")
root.title("Reset Countdown")
try:
root.iconbitmap(default='icon.ico')
except _tkinter.TclError:
messagebox.showerror("Icon Error", "Unable to find icon resource. Put me back with my friends!")
exit(1)
# Vars that need to be shared around to work.
hour = StringVar()
hour.set("0")
minute = StringVar()
minute.set("0")
second = StringVar()
second.set("0")
thirty_min = BooleanVar()
thirty_min.set(False)
five_min = BooleanVar()
five_min.set(False)
running = BooleanVar()
running.set(True)
temp = 0
# create the boxes.
hourEntry = Entry(root, width=4, font=("Arial", 14, ""), textvariable=hour)
hourEntry.place(x=20, y=20)
minuteEntry = Entry(root, width=4, font=("Arial", 14, ""), textvariable=minute)
minuteEntry.place(x=77, y=20)
secondEntry = Entry(root, width=4, font=("Arial", 14, ""), textvariable=second)
secondEntry.place(x=133, y=20)
def na_time():
running.set(True)
today_utc = datetime.datetime.utcnow()
# if today is not sat (sat 2am UTC when reset is)
# find next saturday and say how long it is
if today_utc.weekday() != 5:
reset_utc = rel.relativedelta(hour=2, minute=0, second=0, microsecond=0, days=1, weekday=rel.SA)
reset = today_utc + reset_utc
countdown(int((reset-today_utc).total_seconds()))
# today is saturday
else:
# we're after 2am in the day just go about it normally
if datetime.datetime.utcnow().time() >= datetime.time(2, 0, 0, 0):
reset_utc = rel.relativedelta(hour=2, minute=0, second=0, microsecond=0, days=1, weekday=rel.SA)
reset = today_utc + reset_utc
countdown(int((reset-today_utc).total_seconds()))
# otherwise don't go to any other day, just give the 2hr countdown
else:
reset = today_utc + rel.relativedelta(hour=2, minute=0, second=0, microsecond=0)
countdown(int((reset-today_utc).total_seconds()))
def eu_time():
running.set(True)
today_utc = datetime.datetime.utcnow()
# if today is not fri (fri 6pm UTC when reset is)
# find next fri and say how long it is
if today_utc.weekday() != 4:
reset_utc = rel.relativedelta(hour=18, minute=0, second=0, microsecond=0, days=1, weekday=rel.FR)
reset = today_utc + reset_utc
countdown(int((reset-today_utc).total_seconds()))
# today is fri
else:
# we're after 6pm in the day just go about it normally
if datetime.datetime.utcnow().time() >= datetime.time(18, 0, 0, 0):
reset_utc = rel.relativedelta(hour=18, minute=0, second=0, microsecond=0, days=1, weekday=rel.FR)
reset = today_utc + reset_utc
countdown(int((reset-today_utc).total_seconds()))
# otherwise don't go to any other day, just give the 18hr countdown
else:
reset = today_utc + rel.relativedelta(hour=18, minute=0, second=0, microsecond=0)
countdown(int((reset-today_utc).total_seconds()))
# finds military time only!
def my_raid():
running.set(True)
try:
int(hour.get())
int(minute.get())
int(second.get())
except ValueError:
messagebox.showerror("Bad Input", "Please input numbers into the boxes")
return
now = datetime.datetime.today()
try:
target_time = rel.relativedelta(hour=int(hour.get()), minute=int(minute.get()),
second=int(second.get()), microsecond=0)
my_time = now + target_time
if int((my_time-now).total_seconds()) <= 0:
messagebox.showerror("Bad Input", "Please use military time\nFor PM, simply add 12 hour.")
countdown(int((my_time-now).total_seconds()))
except ValueError:
messagebox.showerror("Bad Input", "Please use:\nBetween 0 and 23 on hours\nBetween 0 and 60 on "
"minutes and seconds.")
def accurate_time():
global temp
while temp > -1:
time.sleep(1)
temp -= 1
def countdown(work_time):
global temp
t1 = threading.Thread(target=lambda: accurate_time())
try:
temp = work_time
t1.start()
except ValueError:
messagebox.showerror("Value Error", "Incorrect values! Date Calculation malfunction")
while temp > -1:
# minutes=temp/60, seconds = temp%60)
minutes, seconds = divmod(temp, 60)
# hours. Pop up hours only if we've got a full hour.
# hours = temp/60, minutes = temp%60)
hours = 0
if minutes > 60:
hours, minutes = divmod(minutes, 60)
hour.set("{0:3d}".format(hours))
minute.set("{0:2d}".format(minutes))
second.set("{0:2d}".format(seconds))
# we should auto-join here because acc_time can't run, so it joins default below
if not running.get():
temp = -1
else:
try:
if thirty_min.get() and temp == 1800:
winsound.PlaySound("SystemExclamation", winsound.SND_ASYNC)
elif five_min.get() and temp == 300:
winsound.PlaySound("SystemExclamation", winsound.SND_ASYNC)
root.update()
root.after(1000)
except _tkinter.TclError:
temp = -1
t1.join()
return
# setting helps fix stuck 1 left issue as well as go back to default if not running
hour.set("0")
minute.set("0")
second.set("0")
root.update()
t1.join()
# nastier than before, but safer to delete the thread
def close_program():
response = messagebox.askyesno("Reset Countdown - Close?", "Do you want to close the program?")
if response:
global temp
temp = -1
root.quit()
btn = Button(root, text=" NA ", bd='5', command=na_time)
btn.place(x=20, y=60)
btn = Button(root, text=" EU ", bd='5', command=eu_time)
btn.place(x=65, y=60)
btn = Button(root, text=" Custom Today ", bd='5', command=my_raid)
btn.place(x=20, y=95)
# menu bar
menu_bar = Menu(root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="Reset", command=lambda: running.set(False))
file_menu.add_separator()
file_menu.add_checkbutton(label="5 Minute Reminder Ping", command=lambda: five_min.set(not five_min.get()))
file_menu.add_checkbutton(label="30 Minute Reminder Ping", command=lambda: thirty_min.set(not thirty_min.get()))
file_menu.add_separator()
file_menu.add_command(label="Close", command=close_program)
menu_bar.add_cascade(label="File", menu=file_menu)
# help menu
help_menu = Menu(menu_bar, tearoff=0)
help_menu.add_command(label="Ping Test", command=lambda: winsound.PlaySound("SystemExclamation", winsound.SND_ASYNC))
menu_bar.add_cascade(label="Help", menu=help_menu)
root.config(menu=menu_bar)
root.protocol("WM_DELETE_WINDOW", close_program)
root.mainloop()
|
signal_processing_module.py
|
import numpy as np
from scipy import signal # Det här kanske behöver importeras på något annat sätt.
import matplotlib.pyplot as plt # TODO: ta bort sen
import time # TODO: Ta bort sen
from scipy.fftpack import fft
from scipy.signal import spectrogram # To plot spectrogram of FFT.
import threading
import queue
import os
class SignalProcessing:
# FFTfreq and FFTamplitude are temporary for testing FFT. Remove later
def __init__(self, list_of_variables_for_threads, bluetooth_server, FFTfreq, FFTamplitude):
self.list_of_variables_for_threads = list_of_variables_for_threads
self.go = list_of_variables_for_threads["go"]
self.HR_filtered_queue = list_of_variables_for_threads["HR_filtered_queue"]
self.HR_final_queue = list_of_variables_for_threads["HR_final_queue"] # TODO ta bort
self.sample_freq = list_of_variables_for_threads["sample_freq"]
self.bluetooth_server = bluetooth_server
# Variables for Schmitt Trigger
self.RR_filtered_queue = list_of_variables_for_threads["RR_filtered_queue"]
self.RR_final_queue = list_of_variables_for_threads["RR_final_queue"]
self.freqArrayTemp_last = [] # If no breathing rate is found use last value
# print(list(self.RR_final_queue.queue))
self.RTB_final_queue = list_of_variables_for_threads["RTB_final_queue"]
self.time_when_sent_last_value = None # to check time passed after sent a value
# Variables for Pulse detection
self.index_fft = 0
self.T_resolution = 20 # förut 30
self.overlap = 90 # Percentage of old values for the new FFT
self.beta = 1 # Kaiser window form
self.tau = 12 # TODO Beskriva alla variabler
# Data in vector with length of window
self.fft_window = np.zeros(self.T_resolution*self.sample_freq) # Width in samples of FFT
self.window_width = int(len(self.fft_window))
self.total_fft_length = int(1.5*self.window_width)
# window_width_half = int(window_width/2) # Since FFT only processes half of freq (Nyqvist)
self.window_slide = int(np.round(self.window_width*(1-self.overlap/100)))
self.window_slide_global = list_of_variables_for_threads["window_slide"]
self.window_slide_global = self.window_slide
# self.freq = self.sample_freq * \
# np.arange(self.total_fft_length/2)/self.window_width # Evenly spaced freq array
self.freq = np.linspace(0, self.sample_freq/2, num=self.total_fft_length/2)
self.delta_T = self.window_slide / self.sample_freq
# int(round(self.tau / self.delta_T)) # Make tau is larger than delta_T, else it will be zero and programme will fail.
self.number_of_old_FFT = 15
self.FFT_old_values = np.zeros((self.number_of_old_FFT, int(
self.total_fft_length/2))) # Saving old values for moving mean
# Starta heart_rate
print("Start thread heart_rate")
self.heart_rate_thread = threading.Thread(target=self.heart_rate)
self.heart_rate_thread.start()
# Starta schmitt
self.schmittTrigger_thread = threading.Thread(target=self.schmittTrigger)
self.schmittTrigger_thread.start()
self.last_time = time.time()
self.time = time.time()
# Temporary for test of FFT and saving to csv
self.FFTfreq = FFTfreq
self.FFTamplitude = FFTamplitude
self.peak_freq = []
self.peak_amplitude = []
self.peak_weighted = []
self.len_fft = 0
self.heart_rate_csv = list_of_variables_for_threads["heart_rate_csv"]
self.start_write_to_csv_time = list_of_variables_for_threads["start_write_to_csv_time"]
self.initiate_write_heart_rate = list_of_variables_for_threads["initiate_write_heart_rate"]
self.heart_rate_reliability_csv = []
self.heart_rate_spectrum = []
self.heart_rate_frequency = []
# Kaos i koden, behöver struktureras upp och alla konstanter måste defineras i början
# Följer just nu Matlab strukturen.
def heart_rate(self): # MAIN for finding pulse
# print("heart_rate thread started")
index_in_FFT_old_values = 0 # Placement of old FFT in FFT_old_values
FFT_counter = 1 # In start to avg over FFT_counter before FFT_old_values is filled to max
found_heart_freq_old = 180/60 # Guess the first freq
# Variables for weigthed peaks
#multiplication_factor = 20
time_constant = 2
start_time = time.time()
first_real_value = True # the first real heart rate found
old_heart_freq_list = [] # old values
found_peak_reliability = "None"
found_peak_reliability_int = 0
while self.go:
# print("in while loop heart_rate")
fft_signal_out = self.windowedFFT()
fft_signal_out_dB = 20*np.log10(fft_signal_out) # As of May 7, lenght of vector is 600
self.FFT_old_values[index_in_FFT_old_values][:] = fft_signal_out_dB
# saved_old = self.FFT_old_values[:, 2] #to print
# fft movemean
FFT_averaged = self.mean_of_old_values(FFT_counter)
#print("Length of averaged FFT: ", len(FFT_averaged))
# Returns the peaks in set inteval from averaged FFT
peak_freq, peak_amplitude = self.findPeaks(FFT_averaged)
if len(peak_freq) > 0 and np.amin(peak_amplitude) > -40 and np.amax(peak_amplitude) > -30 and time.time() - start_time > 50:
# In case zero peaks, use last value, and to not trigger on noise, and there is just noise before 30 seconds has passed
# Going into own method when tested and working staying in "main loop"
delta_freq = []
for freq in peak_freq:
delta_freq.append(freq - found_heart_freq_old)
self.peak_weighted = []
close_peaks = []
close_disturbing_peaks = []
try:
for i in range(0, len(peak_freq)): # Weight the peaks found depending on their amplitude,
if peak_freq[i] < 0.9:
multiplication_factor = 5 # to lower the noise peak under 0.9 Hz
elif peak_freq[i] < 1:
multiplication_factor = 7 # to lower the noise peak under 1 Hz
else:
multiplication_factor = 10
# distance to the last tracked peak, and on the frequency (the noise is kind of 1/f, so to to fix that multiply with f)
self.peak_weighted.append(peak_amplitude[i] + multiplication_factor * np.exp(
-np.abs(peak_freq[i] - found_heart_freq_old) / time_constant) * np.sqrt(
np.sqrt(peak_freq[i])))
if np.abs(peak_freq[i] - found_heart_freq_old) < 0.2 and np.abs(
peak_amplitude[i] - found_heart_freq_amplitude_old) < 4 and (
found_heart_freq_old < 1 or peak_freq[i] > 1):
# To average peaks if they are close
close_peaks.append(peak_freq[i])
elif np.abs(peak_freq[i] - found_heart_freq_old) < 0.5 and np.abs(
peak_amplitude[i] - found_heart_freq_amplitude_old) < 5:
# If there is a lot of peaks to disturb the measurement
close_disturbing_peaks.append(peak_freq[i])
found_peak_index = np.argmax(np.array(self.peak_weighted))
found_heart_freq = peak_freq[found_peak_index]
found_heart_freq_amplitude_old = self.peak_amplitude[found_peak_index]
# Determine the reliability of the found peak, if it's really the heart rate or just noise.
# Compares to the next largest peak amplitude
try:
next_largest_peak_amplitude = np.amax(
self.peak_amplitude[:found_peak_index]+self.peak_amplitude[found_peak_index+1:])
except:
next_largest_peak_amplitude = -35
if found_heart_freq_amplitude_old - next_largest_peak_amplitude > 12:
found_peak_reliability = "ExceptionalHigh"
found_peak_reliability_int = 6
elif found_heart_freq_amplitude_old - next_largest_peak_amplitude > 7:
found_peak_reliability = "VeryHigh"
found_peak_reliability_int = 5
elif found_heart_freq_amplitude_old - next_largest_peak_amplitude > 4:
found_peak_reliability = "High"
found_peak_reliability_int = 4
elif found_heart_freq_amplitude_old - next_largest_peak_amplitude > 3:
found_peak_reliability = "Medium"
found_peak_reliability_int = 3
else:
found_peak_reliability = "Low" # TODO uncertain?
found_peak_reliability_int = 2
if len(close_peaks) > 1:
print('averaging, old:', found_heart_freq)
found_heart_freq = np.mean(close_peaks)
if len(close_disturbing_peaks) > 3 and found_heart_freq_old > 1:
# To many disturbing peaks around, can't identify the correct one
#print('Too many disturbing peaks around, can\'t identify the correct one')
found_heart_freq = found_heart_freq_old
found_peak_reliability = "VeryLow"
found_peak_reliability_int = 1
old_heart_freq_list.append(found_heart_freq) # save last 20 values
if len(old_heart_freq_list) > 5:
old_heart_freq_list.pop(0)
if np.abs(np.mean(old_heart_freq_list[
0:-2]) - found_heart_freq) > 0.1: # too big change, probably noise or other disruptions
found_heart_freq = np.mean(old_heart_freq_list)
#print('Too big change, probably noise or other disruptions, old:', old_heart_freq_list[-1])
except Exception as e:
print('exept in heart peak:', e)
found_heart_freq = 0
if first_real_value and (found_heart_freq > 1 or time.time() - start_time > 120):
first_real_value = False
if found_heart_freq < 1 and first_real_value: # Do not trigger on the large noise peak under 1 Hz
found_heart_freq = 0
found_heart_freq_old = found_heart_freq
elif len(peak_freq) > 0 and np.amin(peak_amplitude) > -40:
found_heart_freq = found_heart_freq_old # just use the last values
found_peak_reliability = "VeryLow"
found_peak_reliability_int = 1
else:
#found_heart_freq = found_heart_freq_old
found_heart_freq = 0
self.peak_weighted.clear()
found_peak_reliability = "None"
found_peak_reliability_int = 0
if not first_real_value:
print("Found heart rate Hz and BPM: ", found_heart_freq, int(
60*found_heart_freq), 'Reliability:', found_peak_reliability)
found_heart_rate = int(60 * found_heart_freq)
self.bluetooth_server.write_data_to_app(
str(found_heart_rate) + ' ' + found_peak_reliability, 'heart rate') # Send to app
else:
print("Waiting to find heart rate")
found_heart_rate = 0
found_peak_reliability = "None"
found_peak_reliability_int = 0
self.bluetooth_server.write_data_to_app(
str(found_heart_rate) + ' ' + found_peak_reliability, 'heart rate') # Send to app
# BPM_search = self.freq * 60 # Used where?
# print("past plot heart rate")
# increment counters in loop
if FFT_counter < self.number_of_old_FFT:
FFT_counter += 1
index_in_FFT_old_values += 1
if index_in_FFT_old_values == self.number_of_old_FFT:
index_in_FFT_old_values = 0
# initiate save to CSV'
# print("time for csv write List: ",
# self.list_of_variables_for_threads["start_write_to_csv_time"])
if self.initiate_write_heart_rate and time.time() - self.list_of_variables_for_threads["start_write_to_csv_time"] < 5*60:
print("Inside save to csv statement")
# self.heart_rate_spectrum.append(self.FFTamplitude)
# self.heart_rate_frequency.append(self.FFTfreq)
self.heart_rate_csv.append(found_heart_rate)
self.heart_rate_reliability_csv.append(found_peak_reliability_int)
elif self.initiate_write_heart_rate:
np_csv = np.asarray(self.heart_rate_csv)
np.savetxt("heart_rate.csv", np_csv, delimiter=";")
np_csv = np.asarray(self.heart_rate_reliability_csv)
np.savetxt("heart_rate_reliability.csv", np_csv, delimiter=";")
print("Should have saved CSV")
#self.go.pop(0)
#self.list_of_variables_for_threads["go"] = self.go
# np_csv = np.asarray(self.heart_rate_csv)
# np.savetxt("heart_rate.csv", np_csv, delimiter=";")
# np_csv = np.asarray(self.heart_rate_reliability_csv)
# np.savetxt("heart_rate_reliability.csv", np_csv, delimiter=";")
# print("Should have saved CSV")
# Remove Bluetooth clients
# for client in self.bluetooth_server.client_list:
# print('try to remove client ' +
# str(self.bluetooth_server.address_list[self.bluetooth_server.client_list.index(client)]))
# client.close()
# print('remove client ' +
# str(self.bluetooth_server.address_list[self.bluetooth_server.client_list.index(client)]))
# self.bluetooth_server.server.close()
# print("server is now closed")
# os.system("echo 'power off\nquit' | bluetoothctl")
print("Out of pulse")
def mean_of_old_values(self, FFT_counter): # Check
FFT_average_over = np.zeros(int(self.total_fft_length/2))
for columns in range(0, int(self.total_fft_length/2)):
for rows in range(0, self.number_of_old_FFT):
FFT_average_over[columns] = self.FFT_old_values[rows][columns] + \
FFT_average_over[columns]
#print("Mean of old values: ", self.FFT_average_out / FFT_counter)
return FFT_average_over / FFT_counter
### windowedFFT ###
# input:
# fft_window: array to be filled with filtered data. And then to be fft:d
# overlap: how many overlapping values between two consecutive fft windows. [in percentage]
# beta: shape factor for kaiser window.
# returns:
# freq: corresponding frequency array
# fft_signal_out: fft:d array
def windowedFFT(self):
# window_width = len(fft_window) # size of each window
# window_slide = int(np.round(window_width*(1-overlap/100))) # number of overlapping points
# print("Window slide: ", window_slide)
for i in range(self.window_slide): # fills the fft_window array with window_slide values from filtered queue
self.fft_window[self.index_fft] = self.HR_filtered_queue.get()
self.index_fft += 1
if self.index_fft == self.window_width:
self.index_fft = 0
# TODO: Check if necessary. # roll the matrix so that the last inserted value is to the right.
self.fft_window = np.roll(self.fft_window, -(self.index_fft+1))
fft_signal_out = self.smartFFT() # do fft
# TODO: check if necessayr. # roll the matrix back
self.fft_window = np.roll(self.fft_window, (self.index_fft+1))
return fft_signal_out
### smartFFT ###
# input:
# signal_in: in signal as an array
# beta: shape factor for the window
# returns:
# freq: frequency array [Hz]
# signal_out: fft of the in signal as an array
def smartFFT(self): # "signal_in" is "fft_window"
# print("In smartFFT")
# length_seq = len(signal_in) # number of sequences
window = np.kaiser(self.window_width, self.beta) # beta: shape factor
self.fft_window = np.multiply(self.fft_window, window)
# two-sided fft of input signal
signal_in_fft = fft(self.fft_window, n=self.total_fft_length) # ,n=2*self.window_width)
#print("len of fft: ", len(signal_in_fft))
signal_fft_abs = np.abs(np.divide(signal_in_fft, self.window_width))
#print("fft abs: ", signal_fft_abs)
signal_out = np.multiply(2, signal_fft_abs[0:self.total_fft_length//2]) # one-sided fft
#print("Signal out: ", signal_out)
#print("len of signal out: ", len(signal_out))
# frequency array corresponding to frequencies in the fft
return signal_out
def findPeaks(self, FFT_averaged):
# Lower and higher freq for removing unwanted areas of the FFT
# TODO Unsure about this part, same max freq several times in a row
F_scan_lower = 0.8
F_scan_upper = 3
#print("len self freq: ", len(self.freq))
FFT_in_interval = FFT_averaged[self.freq <= F_scan_upper]
freq2 = self.freq[self.freq <= F_scan_upper]
FFT_in_interval = FFT_in_interval[freq2 > F_scan_lower]
peak_freq_linspace = np.linspace(F_scan_lower, F_scan_upper, num=len(FFT_in_interval))
#print("len of fft in interval: ", len(FFT_in_interval))
#print("FFT_in_interval", FFT_in_interval, "\n", len(FFT_in_interval))
MaxFFT = np.amax(FFT_in_interval) # Do on one line later, to remove outliers
#threshold = MaxFFT - 10
threshold = -35
peaks, _ = signal.find_peaks(FFT_in_interval)
index_list = []
index = 0
for peak in peaks:
if FFT_in_interval[peak] < threshold:
index_list.append(index)
index += 1
peaks = np.delete(peaks, index_list)
#print("Peaks: ",)
self.peak_freq = [] # Maybe change to array?
for i in peaks:
self.peak_freq.append(peak_freq_linspace[i])
#print("Found peak freq: ", self.peak_freq)
self.peak_amplitude = []
for i in peaks:
self.peak_amplitude.append(FFT_in_interval[i])
# Plotting for FFT
self.FFTfreq = peak_freq_linspace
self.FFTamplitude = FFT_in_interval
self.len_fft = int(len(FFT_in_interval))
#print("Length of fft:", self.len_fft)
return self.peak_freq, self.peak_amplitude
# TODO Used for plotting in main, remove later
def getFFTvalues(self):
return self.FFTfreq, self.FFTamplitude, self.peak_freq, self.peak_amplitude, self.len_fft, self.peak_weighted
def schmittTrigger(self):
print("SchmittTrigger started")
# Test for time
Inside = True
# variable declaration
Tc = 12 # medelvärdesbildning över antal [s]
schNy = 0 # Schmitt ny
schGa = 0 # Schmitt gammal
Hcut = 0.001 # Higher hysteres cut. Change this according to filter. To manage startup of filter
Lcut = -Hcut # Lower hysteres cut
# average over old values. TODO ev. ingen medelvärdesbildning. För att förhindra att andningen går mot ett fast värde. Vi vill se mer i realtid.
avOver = 8
freqArray = np.zeros(avOver) # for averaging over old values
count = 1 # for counting number of samples passed since last negative flank
countHys = 1 # for counting if hysteresis should be updated
FHighRR = 0.7 # To remove outliers in mean value
FLowRR = 0.1 # To remove outliers in mean value
# for saving respiratory_queue_RR old values for hysteresis
trackedRRvector = np.zeros(self.sample_freq * Tc) # to save old values
while self.go:
# to be able to use the same value in the whole loop
if self.time_when_sent_last_value is not None and (time.time() - self.time_when_sent_last_value > 10):
# sends zero as breath rate if no value was found the last ten seconds
self.bluetooth_server.write_data_to_app(0, 'breath rate')
self.time_when_sent_last_value = time.time()
trackedRRvector[countHys - 1] = self.RR_filtered_queue.get()
#print("Amplitude for respitory rate {}".format(trackedRRvector[countHys-1]))
# self.RTB_final_queue.put(trackedRRvector[countHys - 1])
if countHys == self.sample_freq * Tc:
Hcut = np.sqrt(np.mean(np.square(trackedRRvector))) * 0.7 # rms of trackedRRvector
# Hcut = 0.002
if Hcut < 0.1:
Hcut = 0.1
Lcut = -Hcut
# print("Hcut: ", Hcut) # se vad hysteres blir
# print("The last value of vector {}".format(trackedRRvector[countHys-1]))
# TODO Hinder så att insvängningstiden för filtret hanteras
countHys = 0
# schNy = schGa behövs inte. Görs nedan
# trackedRRvector[countHys-1] is the current data from filter
# Takes long time to go into this loop
if trackedRRvector[countHys - 1] <= Lcut:
schNy = 0
if schGa == 1:
# print("Inside update resprate loop")
np.roll(freqArray, 1)
# save the new frequency between two negative flanks
freqArray[0] = self.sample_freq / count
# Take the mean value
# RR_final_queue is supposed to be the breathing rate queue that is sent to app
# self.RR_final_queue.put(self.getMeanOfFreqArray(freqArray, FHighRR, FLowRR))
# start = time.time()
self.bluetooth_server.write_data_to_app(
self.getMeanOfFreqArray(freqArray, FHighRR, FLowRR), 'breath rate')
self.time_when_sent_last_value = time.time()
# done = time.time() # verkar ta lite tid, troligtvis på grund av getMeanOfFrequency
# print('send to app', (done - start)*1000)
# TODO put getMeanOfFreqArray() into queue that connects to send bluetooth values instead
count = 0
# trackedRRvector[countHys-1] is the current data from filter
elif trackedRRvector[countHys - 1] >= Hcut:
schNy = 1
schGa = schNy
count += 1
countHys += 1
end = time.time()
# print("Tid genom schmittTrigger: ", end-start)
print("out of schmittTrigger")
# Used in schmittTrigger. Removes outliers and return mean value over last avOver values.
def getMeanOfFreqArray(self, freqArray, FHighRR, FLowRR): # remove all values > FHighRR and < FLowRR
self.time = time.time()
# print("Since last time {}".format(self.time - self.last_time))
self.last_time = self.time
start = time.time()
# freqArrayTemp = [x for x in freqArray if (x < FHighRR and x > FLowRR)]
index_list = []
index = 0
# print("Before removal: Array {} \n Low and high hyst {},{}".format(freqArray, FLowRR, FHighRR))
for freq_value in freqArray:
if freq_value < FLowRR or freq_value > FHighRR or freq_value == 0:
index_list.append(index)
index += 1
freqArrayTemp = np.delete(freqArray, index_list)
# print("After removal but before deviation: ", freqArrayTemp)
# freqArrayTemp = [x for x in freqArrayTemp if x != 0]
# print(non_zero_temp)
# print(type(non_zero_temp))
# freqArrayTemp = freqArrayTemp[non_zero_temp]
# a[nonzero(a)]
median = np.median(freqArrayTemp) # median value
stanDev = np.std(freqArrayTemp) # standard deviation
# freqArrayTemp = [x for x in freqArrayTemp if (
# x > median - 3 * stanDev and x < median + 3 * stanDev)]
# print(freqArrayTemp)
index_list = []
index = 0
for freq_value in freqArrayTemp:
if freq_value < median - 3 * stanDev and freq_value > median - 3 * stanDev:
index_list.append(index)
index += 1
freqArrayTemp = np.delete(freqArrayTemp, index_list)
# print("Last array before mean value {}".format(freqArrayTemp))
# if len(freqArrayTemp) == 0:
# freqArrayTemp = self.freqArrayTemp_last
# else:
# self.freqArrayTemp_last = freqArrayTemp
mean = np.mean(freqArrayTemp) # mean value of last avOver values excluding outliers
# mean is nan if FreqArrayTemp is zero, which creates error when sending data to app
if len(freqArrayTemp) == 0:
mean = 0 # TODO ta det föregående värdet istället
print("No values left in freqArrayTemp")
mean = mean * 60 # To get resp rate in Hz to BPM
mean = int(np.round(mean))
# print("data from schmitt {}".format(mean))
end = time.time()
# print("Time through getMeanFreq {}".format(end-start))
return mean
# MAIN ## TODO: Ta bort MAIN sen
# #windowedFFT(data_in, sample_freq, T_resolution, overlap, beta)
# HR_filtered_queue = queue.Queue()
# HR_final_queue = queue.Queue()
# RR_filtered_queue = queue.Queue()
# RR_final_queue = queue.Queue()
# sample_freq = 20
# length_seq = 100000
# sample_spacing = 1/sample_freq
# t = np.arange(length_seq)*sample_spacing
# signal_in = 4*np.sin(1 * 2.0*np.pi*t) + 0*np.sin(2 * 2.0*np.pi*t)
# # print(signal_in)
# for i in range(len(signal_in)):
# HR_filtered_queue.put(signal_in[i])
# RR_filtered_queue.put(signal_in[i])
# go = ["True"]
# signal_processing = SignalProcessing(
# go, HR_filtered_queue, HR_final_queue, RR_filtered_queue, RR_final_queue)
# time.sleep(0.5)
# go.pop(0)
#### Test av smartFFT ####
# sample_freq = 20
# length_seq = 600
# sample_spacing = 1/sample_freq
# t = np.arange(length_seq)*sample_spacing
# signal_in = 4*np.sin(1 * 2.0*np.pi*t) + 0.5*np.sin(4 * 2.0*np.pi*t)
# #signal_in = np.roll(signal_in, 5)
# beta = 1
# [freq,signal_out] = smartFFT(signal_in,sample_freq,beta)
# plt.plot(freq, signal_out)
# plt.grid()
# plt.show()
|
PagesController.py
|
import csv
from operator import contains
import random
import string
import threading
import uuid
from datetime import datetime as date
from flask import redirect, render_template, url_for
from models import ContractsModel, ImportModel
class Pages:
def __init__(self):
self.imports = ImportModel.Imports()
self.contract = ContractsModel.Contracts()
self.file_import = f'import_{uuid.uuid4()}.csv'
self.file_link = f'links_{uuid.uuid4()}.csv'
self.key = []
self.link_random = None
self.link_user = {
"key_access": [],
"link_random": [],
"error_status": []
}
self.variables = {}
self.count_error = 0 # contador de erros
self.line_errors = {}
self.errors = {
"count": 0,
"line_errors": {}
}
def index(self, req):
if req.method == 'GET':
return render_template('index.html')
def new_imports(self, req):
if req.method == 'GET':
data = self.imports.find({})
return render_template('import.html', data=data)
elif req.method == 'POST':
try:
file_name = req.files['fileName']
file_name.save('static/uploads/' + self.file_import) # Salvando um arquivo CSV
t = threading.Thread(target=self.process, args=(file_name, )) # Execução do Thread
t.start()
except Exception as e:
print(e)
return redirect(url_for('pages.imports', filename=file_name))
def errors_report(self, req):
errors = self.imports.find({})
return render_template('errors.html', errors=errors)
def process(self, file_name):
with open('static/uploads/' + file_name.filename, 'r', encoding='utf-8') as read_file_CSV:
# next(read_file_CSV)
table = csv.reader(read_file_CSV, delimiter=';')
count_line = 1 # em qual linha está
count_contract_success = 0
index_count = 0
for row in table:
self.new_variables(row, count_line)
count_error = 0
self.generate_link_random()
try:
name = row[0]
key_access = row[1]
contract = row[2]
input_value = row[3]
date_entries = row[4]
installment_amount = row[5]
installment = row[6]
value_installment = row[7]
expire = row[8]
if not name:
count_error += 1
self.line_errors[f'{count_line}-1'] = 'coluna nome vazia'
if not key_access:
count_error += 1
self.line_errors[f'{count_line}-2'] = 'coluna chave de acesso vazia'
if not contract:
count_error += 1
self.line_errors[f'{count_line}-3'] = 'coluna contrato vazia'
if not input_value:
count_error += 1
self.line_errors[f'{count_line}-4'] = 'coluna valor de entrada vazia'
else:
self.monetary_format(input_value)
if not date_entries:
count_error += 1
self.line_errors[f'{count_line}-5'] = 'coluna data de entrada vazia'
else:
date_entries = date_entries.split('/')
for data in range(len(date_entries)):
print(data, 'data')
print(len(date_entries), 'len date')
date_entries[data] = int(date_entries[data])
date_entries = date(date_entries[2], date_entries[1], date_entries[0]).strftime('%d/%m/%Y')
if not installment_amount:
count_error += 1
self.line_errors[f'{count_line}-6'] = 'coluna quantidade de parcelas vazia'
else:
installment_amount = int(installment_amount)
if not installment:
count_error += 1
self.line_errors[f'{count_line}-7'] = 'coluna vencimentos das parcelas vazia'
else:
installment = int(installment)
if not value_installment:
count_error += 1
self.line_errors[f'{count_line}-8'] = 'coluna valor das parcelas vazia'
else:
value_installment = int(value_installment)
if not expire:
count_error += 1
self.line_errors[f'{count_line}-9'] = 'coluna expiração das parcelas vazia'
else:
expire = expire.split('/')
for data in range(len(expire)):
expire[data] = int(expire[data])
expire = date(expire[2], expire[1], expire[0]).strftime('%d/%m/%Y')
self.errors['count'] = self.errors['count'] + count_error
self.errors['line_errors'] = self.line_errors
if count_error > 0:
self.link_user["error_status"].append('Sim')
else:
wallet = 5
company_id = 1
user_access_control = self.contract.find({'company_id':company_id, 'access_key': key_access})
if len(user_access_control) > 0:
self.contract.update(
{
'access_key': key_access,
'contract': contract,
'wallet': wallet,
'company_id': company_id
},
{"status": False, "status_type": "Atualizado"}
)
link_user_old = user_access_control[-1]['link']
self.create_contracts_bd(
name, key_access, contract, input_value,
date_entries, value_installment,installment_amount,
installment, expire, wallet, company_id
)
self.contract.update(
{'link':self.link_random},
{'link': link_user_old}
)
else:
self.create_contracts_bd(
name, key_access, contract, input_value,
date_entries, value_installment,installment_amount,
installment, expire, wallet, company_id
)
self.link_user["error_status"].append('Não')
count_contract_success += 1
self.link_user["key_access"].append(key_access)
self.writer_csv_file(index_count, count_line)
except Exception as e:
print(e)
count_line += 1
self.create_imports_bd(file_name, count_contract_success)
def new_variables(self, row, count_line):
index_value = 9
index_key = 9
if count_line == 1:
if len(row) < 8:
pass
else:
try:
while row[index_key]:
self.key.append(row[index_key])
self.variables[row[index_key]] = ''
index_key += 1
#fazer replace no padrão das Variaveis
except Exception as e:
print(e)
elif count_line >= 2:
if len(row) < 8:
pass
else:
try:
for i in self.key:
self.variables[i] = row[index_value]
index_value += 1
except Exception as e:
print(e)
def monetary_format(self, input_value):
if "." in input_value:
input_value = input_value.replace('.', '')
if "," in input_value:
input_value = float(input_value.replace(',', '.'))
return input_value
elif "," in input_value:
input_value = float(input_value.replace(',', '.'))
return input_value
def create_contracts_bd(self, name, key_access, contract, input_value, date_entries, value_installment,installment_amount, installment,expire, wallet, company_id):
data_contract = {
"full_name": name,
"access_key": key_access,
"contract": contract,
"entry_value": input_value,
"entry_date": date_entries,
"parcels_value": value_installment,
"parcels_quantity": installment_amount,
"parcels_day": installment,
"expire_date": expire,
"variables": self.variables,
"link": self.link_random,
"status": True,
"wallet": wallet,
"company_id": company_id,
"status_type": "Ativo" # Tipos - ativo, atualizado e expirado
}
self.contract.create(data_contract)
def create_imports_bd(self,file_name, count_contract_success):
data_import = {
"user": "Kaic de Lima Oliveira",
"errors": self.errors,
"company_dir": "Kaic",
"links_file": self.file_link,
"original_name": file_name.filename,
"wallet": 5,
"company_id": 1,
"contracts_quantity": count_contract_success,
"file": self.file_import,
"variables": []
}
self.imports.create(data_import)
def generate_link_random(self):
for x in range(1):
self.link_random = ''.join(random.choice(string.ascii_letters)for _ in range(10))
self.link_user["link_random"].append(self.link_random)
def writer_csv_file(self, index_count, count):
with open('static/downloads/' + self.file_link, 'w', newline='', encoding='utf-8') as new_file: # Escrevendo um arquivo CSV manualmente
writer_csv = csv.writer(new_file, delimiter=';')
for i in range(count):
data_writer = [
['chave_de_acesso', 'link_do_usuario', 'erro'],
[
f"{self.link_user['key_access'][index_count]}",
f"{self.link_user['link_random'][index_count]}",
f"{self.link_user['error_status'][index_count]}"
]
]
index_count += 1
writer_csv.writerows(data_writer)
|
aggregator_handler.py
|
import json
import logging
import threading
from datetime import datetime, timedelta
from time import sleep
import pandas as pd
import utils
from constants import *
from sql_writer import SqlWriter
class MessageHandler:
"""
Class that handles the messages
"""
def handle_message(self, ts_reception: datetime, message_str: str):
"""
Handle a message for a message broker
:param ts_reception: the time this message was received
:param message_str: the message payload (json string)
"""
pass
class AggregatorHandler(MessageHandler):
"""
Implementation of MessageHandler that aggregates the messages before writing them into a database
"""
def __init__(self, configuration, sql_writer: SqlWriter):
self.configuration = configuration
self.sql_writer = sql_writer
self.mqtt_delay = timedelta(seconds=int(configuration[MQTT_DELAY]))
self.messages = {}
self.previous_minute = utils.extract_datetime_minute(utils.now())
# create a thread that will aggregate messages each minutes and write them to the data base
agg_thread = threading.Thread(target=self._aggregate_messages) # instantiating without any argument
agg_thread.start()
def _aggregate_messages(self):
"""
Runnable that will loop forever to aggregate the messages each minute and write them to the database
:return:
"""
# first time we need to wait to the end of the first minute
self._sleep(utils.next_minute(self.previous_minute))
while True:
if self.previous_minute in self.messages:
# retrieve the message for the previous minute
msg_minute = self.messages.pop(self.previous_minute)
# aggregate messages and write them to the database
self._aggregate_and_write(self.previous_minute, msg_minute)
self.previous_minute = utils.next_minute(self.previous_minute)
self._sleep(self.previous_minute)
def _aggregate_and_write(self, ts_minute: datetime, msg_minute: list):
"""
Method that aggregate messages and write them to the database
:param msg_minute: the messages to aggregate
"""
logging.info("Aggregate data of {} and write in the database".format(str(ts_minute)))
# we use pandas for the data aggregation and sql writing for the sake of simplicity
df = pd.DataFrame(msg_minute, columns=[MACHINE, TS, LOAD_RATE, MILEAGE])
mean_grpby = df.groupby([MACHINE]).mean()
mean_grpby.insert(0, TS, ts_minute)
self.sql_writer.write(mean_grpby)
def _sleep(self, next_minute: datetime):
"""
Sleep until the next minute to handle with a delay to handle messages received by the mqtt broker
:param next_minute the next minute until it sleep
"""
sleep_duration = (next_minute - utils.now() + self.mqtt_delay).total_seconds()
# sleep only if we have time. The processing of previous message could potentially takes more than one minute
if sleep_duration > 0:
logging.info("Sleep for {} seconds before next aggregation {}".format(
sleep_duration,
next_minute + self.mqtt_delay))
sleep(sleep_duration)
def handle_message(self, ts_reception: datetime, message_str: str):
"""
Handle a message and add it to the set of messages to aggregate
:param ts_reception: the time this message was received
:param message_str: the message payload (json string)
"""
message = json.loads(message_str)
p_message = self._process_message(ts_reception, message)
if p_message is not None:
minute = utils.extract_datetime_minute(p_message[1])
if minute not in self.messages:
self.messages[minute] = []
self.messages.get(minute).append(p_message)
def _process_message(self, ts_reception, message):
"""
process the message and return the processed message or null if the message is not coherent and should not be
processed
:param ts_reception: the reception date time of the message
:param message: the raw message
:return: the processed message
"""
ts = utils.parse_datetime(message[TS]) # transform to a datetime
# we do not take into account messages where the timestamp is in the future
# we only handle past messages with a delay of 1 seconds
if not self.configuration['skip_future_message'] == 'True' or ts_reception >= ts:
# TODO here we should check message integrity (see NOTES.md)
machine = int(message[MACHINE])
load_rate = float(message[LOAD_RATE])
mileage = float(message[MILEAGE])
return [machine, ts, load_rate, mileage]
logging.warning("The following message was ignored as the time stamp is in the future {}".format(message))
|
api.py
|
from flask import Blueprint, session, request, send_file, url_for, Response, abort
from flask_login import login_user, current_user
from io import BytesIO
from uuid import UUID, uuid1
from utilities import SQL, APIResponse
from pyprobe import FFProbe
import threading
import os
import subprocess
import json
import shutil
import re
import pyotp
import datetime
import base64
import glob
import utilities
import html
import hashlib
api = Blueprint('api', __name__, template_folder="templates",
static_folder="static")
"""General function start"""
# Todo: adapt to function "video_to_hls"
def live_to_hls(videoid, streamkey):
root = utilities.abs_path(f"live/{videoid}/")
streamurl = f"rtmp://127.0.0.1:1935/live/{streamkey}"
video_uuid = SQL.select_data(
"`uuid`", "`video`", "videoid=%s", videoid)[0][0]
if not os.path.isdir(root):
os.mkdir(root)
if utilities.config.gpuaccle:
ffmpeg_process_command = f"{utilities.config.ffmpeg_path} -hwaccel cuda -i {streamurl} " + \
f"-c:v copy -c:a copy -b:v 10M -hls_list_size 0 -hls_time 3 {root + video_uuid}.m3u8"
else:
ffmpeg_process_command = f"{utilities.config.ffmpeg_path} -i {streamurl} " + \
f"-c:v copy -c:a copy -b:v 10M -hls_list_size 0 -hls_time 3 {root + video_uuid}.m3u8"
subprocess.call(ffmpeg_process_command, shell=True)
def live_to_video(videoid):
video_uuid = SQL.select_data(
"`uuid`", "`video`", "videoid=%s", videoid)[0][0]
root = utilities.abs_path(f"live/{videoid}/")
try:
os.remove(root + f"{video_uuid}.m3u8")
except (FileNotFoundError, PermissionError):
SQL.delete_data("metadata, video, live",
"videoid=%s", videoid) # todo: test
return False
else:
_, _, files = next(os.walk(root))
with open(f"video/{video_uuid}.m3u8", "w") as f:
f.write(
"#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:2\n#EXT-X-MEDIA-SEQUENCE:0\n")
i = 0
files.sort(key=lambda f: int(re.sub(r'\D', '', f)))
for ts in files:
metadata = FFProbe(root + ts, utilities.config.ffprobe_path)
f.write(
f"#EXTINF:{metadata.streams.video.duration},\n/video/?getvideo={videoid}&seq={i}\n")
shutil.move(root + ts, f"video/{ts}")
i += 1
f.write("#EXT-X-ENDLIST\n#EOF-SHVS")
try:
os.remove(root)
except:
pass
finally:
SQL.update_data("metadata", "live=%s", "5", "videoid=%s", videoid)
return True
def delete_all_video_data(videoid, uuid) -> bool:
# Delete from database
SQL.delete_data("`metadata` INNER JOIN `video` ON `metadata`.`videoid`=`video`.`videoid`",
"`metadata`.`videoid`=%s", videoid, "`metadata`, `video`")
SQL.delete_data("interaction", "videoid=%s", videoid)
# Delete from folder
delete_list = [utilities.abs_path(
f'video/{uuid}.m3u8')] + glob.glob(utilities.abs_path(f'video/{uuid}*.ts'))
for item in delete_list:
if os.path.isfile(item):
os.remove(item)
return True
def allowed_ip(ip) -> bool:
if ip not in utilities.banned_ip:
return True
else:
return utilities.cal_time(utilities.banned_ip[request.remote_addr], 8)
"""General function end"""
def return_json_list(aList):
return Response(json.dumps(aList), mimetype='application/json')
@api.route('/api/live/view/<string:videoid>/<string:watchid>', methods=["DELETE"])
def delete_live_view(videoid, watchid):
if watchid in utilities.watch_token:
if videoid == utilities.watch_token[watchid]["video"]:
if request.remote_addr == utilities.watch_token[watchid]["ip"]:
utilities.live_views[videoid]["current"] -= 1
del utilities.watch_token[watchid]
return APIResponse.response_204()
else:
return APIResponse.response_403_api(message=utilities.error_message["token_ip_mismatch"])
else:
return APIResponse.response_200(error="id_does_not_match")
else:
return "Watch token is invalid.", 404
@api.route('/api/video/hit/<string:videoid>/<string:watchid>', methods=["PUT"])
def add_hitrate(videoid, watchid):
if watchid in utilities.watch_token:
if videoid == utilities.watch_token[watchid]["video"]:
if request.remote_addr == utilities.watch_token[watchid]["ip"]:
SQL.update_data("metadata", "views=views + %s",
"1", "videoid=%s", videoid)
del utilities.watch_token[watchid]
return APIResponse.response_200()
else:
return APIResponse.response_403_api(message=utilities.error_message["token_ip_mismatch"])
else:
return APIResponse.response_200(error="id_does_not_match")
else:
return "Watch token is invalid.", 404
@api.route('/api/video/meta/<string:videoid>', methods=["GET", "POST"])
def video_meta(videoid):
video_info = SQL.select_data(
"title, description, privacy, owner, live", "metadata", "videoid=%s", videoid)
if len(video_info) > 0:
try:
is_author = video_info[0][3] == current_user.id
except:
is_author = False
if video_info[0][2] < 2 or is_author:
if videoid in utilities.progress:
if utilities.progress[videoid] > 100:
progress = 100
else:
progress = utilities.progress[videoid]
else:
progress = "$invalid$"
if request.method == "GET":
thumbnail = url_for('api.thumbnail', videoid=videoid)
return APIResponse.response_200(data={"title": video_info[0][0], "description": video_info[0][1], "privacy": video_info[0][2],
"thumbnail": thumbnail, "progress": progress})
elif request.method == "POST":
if is_author:
payload = request.get_json(force=True)
if "title" in payload and "description" in payload and "privacy" in payload:
if utilities.valid_privacy(payload["privacy"]):
if len(payload["description"]) > 512:
description = payload["description"][:512]
else:
description = payload["description"]
description = html.escape(description)
if payload["title"].strip() != "":
if len(payload["title"]) > 50:
title = payload["title"][:50]
else:
title = payload["title"]
title = html.escape(title)
updated = SQL.update_data("metadata", "title=%s, description=%s, privacy=%s", (
title, description, payload["privacy"]), "videoid=%s", videoid)
else:
updated = SQL.update_data("metadata", "description=%s, privacy=%s", (
description, payload["privacy"]), "videoid=%s", videoid)
if videoid in utilities.video_uuid:
utilities.video_uuid[videoid][2] = int(
payload["privacy"])
if updated:
return APIResponse.response_200(message="Information updated successfully", data={"videoid": videoid})
else:
return APIResponse.response_200(error="general_error")
else:
return APIResponse.response_200(error="privacy_value_error")
else:
return APIResponse.response_200(error="empty_field")
else:
return APIResponse.response_403_api()
else:
return APIResponse.response_200(error="not_yet_authenticate")
else:
return "Video does not exist", 404
# Todo integrate two methods
@api.route('/api/live/status', defaults={'videoid': None}, methods=["GET", "PATCH"])
@api.route('/api/live/status/<string:videoid>', methods=["GET", "PATCH"])
def patch_live_status(videoid):
if request.method == "GET" or videoid is not None:
if videoid in utilities.live_status:
return APIResponse.response_200(data={"status": utilities.live_status[videoid]})
else:
return "Live does not exist.", 404
else:
if request.remote_addr == "127.0.0.1":
payload = json.loads(request.get_json(force=True))
if "key" in payload and "status" in payload and "api_key" in payload:
if payload["api_key"] in utilities.api_key:
status = int(payload["status"])
try:
UUID(payload["api_key"]).version == 1
except:
abort(400)
else:
if 0 <= status <= 4:
videoid = SQL.select_data(
"`videoid`", "`live`", "`streamkey`=%s", payload["key"])
if len(videoid) > 0:
if utilities.live_status[videoid[0][0]] == 2:
utilities.live_status[videoid[0]
[0]] = payload["status"]
return APIResponse.response_204()
else:
return APIResponse.response_200(error="live_must_start")
else:
return "Stream key not found", 404
else:
APIResponse.response_403_api()
else:
abort(400)
else:
APIResponse.response_403_api()
@api.route('/api/live/onair/<string:videoid>', methods=["PATCH"])
def patch_live_onair(videoid):
if current_user.is_authenticated:
meta = SQL.select_data("owner,live", "metadata", "videoid=%s", videoid)
if len(meta) > 0:
if meta[0][0] == current_user.id:
if utilities.live_status[videoid] > 1 and utilities.live_status[videoid] < 4:
if utilities.live_status[videoid] == 3:
abort(423, description="You must stop the feed first.")
SQL.delete_data("live", "videoid=%s", videoid)
SQL.update_data("metadata", "live=%s",
"4", "videoid=%s", videoid)
del utilities.live_status[videoid]
thread = threading.Thread(
target=live_to_video, kwargs={'videoid': videoid})
thread.start()
return APIResponse.response_200(data={"action": 4})
elif utilities.live_status[videoid] == 1:
utilities.live_status[videoid] = 2
streamkey = SQL.select_data(
"streamkey", "live", "videoid=%s", videoid)[0][0]
os.mkdir(utilities.abs_path(f"live/{videoid}"))
with open(utilities.abs_path(f"live/{videoid}/{streamkey}.m3u8"), "w") as f:
f.write(
"""#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:4\n#EXT-X-MEDIA-SEQUENCE:0\n""")
thread = threading.Thread(target=live_to_hls, kwargs={
"videoid": videoid, "streamkey": streamkey})
thread.start()
return APIResponse.response_200(data={"action": 2})
else:
abort(
422, description="This video id may not belong to a live or the live is ended.")
else:
return APIResponse.response_403_api()
else:
return "The live does not exist.", 404
else:
return APIResponse.response_403_api()
@api.route('/api/video/playlist/<string:videoid>')
def playlist(videoid):
if ".m3u8" in videoid:
videoid = videoid.replace(".m3u8", "")
if videoid.isalnum() and len(videoid) == 10:
if videoid not in utilities.video_uuid:
try:
utilities.video_uuid[videoid] = list(SQL.select_data("`video`.`uuid`, `metadata`.`live`, `metadata`.`privacy`, `metadata`.`owner`, `video`.`m3u8`", "video INNER JOIN " +
"`metadata` ON `video`.`videoid`=`metadata`.`videoid`", "`video`.`videoid`=%s", videoid)[0])
except:
if SQL.select_data("COUNT(*)", "metadata", "videoid=%s", videoid)[0][0] == 1:
return "Stream not found or corrupted, please contact administrators of this website.", 500
else:
return "Video source does not exist.", 404
m3u8_file = utilities.video_uuid[videoid]
if m3u8_file[2] == 2:
if not current_user.is_authenticated or utilities.video_uuid[videoid][3] != (current_user.id or None):
abort(403)
if m3u8_file[1] == 0 or m3u8_file[1] == 5:
if os.path.isfile(m3u8_file[4]):
with open(m3u8_file[4], "rb") as f:
if b"EOF-SHVS" in f.read():
return send_file(utilities.abs_path(m3u8_file[4]), mimetype="application/vnd.apple.mpegurl")
else:
return "Stream not found or corrupted, please contact administrators of this website.", 500
else:
return "Stream not found", 404
else:
m3u8_file = utilities.abs_path(
f"live/{videoid}/{m3u8_file[4].replace('video/', '')}")
if os.path.isfile(m3u8_file):
with open(m3u8_file, "rt") as f:
lines = f.read().splitlines()
video = 0
file_to_send = BytesIO()
for line in lines:
if ".ts" in line:
file_to_send.write(
f"/video/?getvideo={videoid}&seq={video}\n".encode())
video += 1
else:
file_to_send.write(f"{line}\n".encode())
file_to_send.seek(0)
return send_file(file_to_send, as_attachment=True, attachment_filename="temp.m3u8", mimetype="application/vnd.apple.mpegurl")
else:
return "Live stream not found", 404
else:
abort(400)
@api.route('/api/thumbnail/<string:videoid>')
def thumbnail(videoid):
path = utilities.abs_path(f"thumbnail/{videoid}.png")
if os.path.exists(path):
return send_file(path)
else:
return api.send_static_file("images/thumbnail.jpg")
@api.route('/api/progress/<string:videoid>')
def getprogress(videoid):
if videoid in utilities.progress:
if utilities.progress[videoid] > 100:
progress = 100
else:
progress = utilities.progress[videoid]
return APIResponse.response_200(data={"videoid": videoid, "progress": progress})
return APIResponse.response_200(error="not_in_process")
@api.route('/api/account/register', methods=["POST"])
def api_register():
payload = request.get_json(force=True)
if "username" in payload and "password" in payload and "email" in payload and "h-captcha-response" in payload:
if utilities.captcha.verify(payload["h-captcha-response"]):
if not current_user.is_authenticated:
if payload["username"].isalnum():
password = payload["password"]
if len(password) == 96 and password.isalnum():
password = base64.b64encode(password.encode())
hashed_password = utilities.argon2.generate_password_hash(
password)
result, errorcode = SQL.insert_data(
"users", "username, password", "%s, %s", (payload["username"], hashed_password))
if payload["email"] is not None and utilities.email_pattern.match(payload["email"]) is None:
return APIResponse.response_200(error="email_invalid")
if result:
if payload["email"] is not None:
utilities.verify_email(
payload["email"], payload["username"])
return APIResponse.response_200(message="Register complete! Please verify the ownership of the E-mail address provided.", data={"username": payload["username"]})
else:
return APIResponse.response_200(message="Register complete! Please proceed to login.", data={"username": payload["username"]})
else:
if errorcode == 1062:
error = "user_exists"
else:
error = "register_try_again"
message = f"Error occurred when creating a new user, please try again. (Internal Error: {errorcode + 120}, " + \
"please provide this number when you contact administrator)"
return APIResponse.response_200(error=error, message=message)
else:
error = "password_not_hash"
else:
error = "username_violation"
else:
error = "authenticated"
else:
error = "captcha_needed"
else:
abort(400)
return APIResponse.response_200(error=error)
@api.route('/api/account/login', methods=["POST"])
def api_login():
payload = request.get_json(force=True)
if "username" in payload and "password" in payload:
if not current_user.is_authenticated:
if allowed_ip(request.remote_addr):
if payload["username"].isalnum():
if len(payload["password"]) == 96 and payload["password"].isalnum():
username = payload["username"]
password = base64.b64encode(
payload["password"].encode())
account = SQL.select_data(
"`password`, `role`, `otp`, `locked`", "users", "`username`=%s", username)
if account:
goes_incorrect_pw = False
stored_password = account[0][0]
else:
goes_incorrect_pw = True
stored_password = utilities.argon2.generate_password_hash(
os.urandom(30)) # Random fake password
# if bcrypt.checkpw(password, account[0][0]):
if not goes_incorrect_pw and utilities.argon2.check_password_hash(stored_password, password):
if account[0][3] == b"\x00":
if account[0][2] is None:
# session["username"] = username
session["role"] = account[0][1]
user = utilities.User()
user.id = username
# user.role = account[0][1]
login_user(user)
return APIResponse.response_200(data={"username": username, "status": "success"})
else:
session['tempusername'] = username
session['role'] = account[0][1]
return APIResponse.response_200(data={"username": username, "status": "otp"})
else:
error = "account_locked"
else:
goes_incorrect_pw = True
if goes_incorrect_pw:
data = ""
error = "incorrect_password"
if "login_tried" not in session:
session["login_tried"] = 1
data = {"tried": session["login_tried"]}
else:
if session["login_tried"] < 2:
session["login_tried"] += 1
data = {"tried": session["login_tried"]}
else:
del session["login_tried"]
utilities.banned_ip[request.remote_addr] = datetime.datetime.now(
)
try:
utilities.send_email(
username, "Attention!", "failed_pwd", ip=request.remote_addr or "unknown")
except RuntimeError:
pass
return APIResponse.response_200(error=error, data=data)
else:
error = "password_not_hash"
else:
error = "username_violation"
else:
error = "banned_addr"
else:
error = "authenticated"
else:
abort(400)
return APIResponse.response_200(error=error)
@api.route('/api/account/password/forgot', methods=["GET", "POST"])
def forgot_password():
if request.method == "GET":
email = request.args.get("email") # todo: username needed
encoded_email = base64.b64encode(
hashlib.sha3_384(email.encode()).digest())
hashed_email = utilities.argon2.generate_password_hash(encoded_email)
result = SQL.select_data("username", "users", "email=%s", hashed_email)
if result:
token = str(uuid1())
if token not in utilities.reset_password:
utilities.reset_password[token] = {}
utilities.reset_password[token]["email"] = email
utilities.reset_password[token]["time"] = datetime.datetime.now()
utilities.send_email(
result[0][0], "Password reset", "reset_password", email=email, token=token)
return APIResponse.response_204()
else:
return APIResponse.response_204()
else:
payload = request.get_json(force=True)
if "password" in payload and "token" in payload and "email" in payload:
if len(payload["password"]) == 96 and payload["password"].isalnum():
if UUID(payload["token"]).version == 1 and payload["token"] in utilities.reset_password:
if utilities.reset_password[payload["token"]]["email"] == payload["email"]:
if not utilities.cal_time(utilities.reset_password[payload["token"]]["time"], 6):
password = base64.b64encode(
payload["password"].encode())
hashed_password = utilities.argon2.generate_password_hash(
password)
if SQL.update_data("users", "password=%s, locked=b%s", (hashed_password, "0"),
"email=%s", utilities.reset_password[payload["token"]]["email"]):
del utilities.reset_password[payload["token"]]
return APIResponse.response_200()
else:
return APIResponse.response_200(error="database_error")
else:
return APIResponse.response_200(error="token_expired")
else:
return APIResponse.response_200(error="token_email_mismatch")
else:
return APIResponse.response_200(error="token_invalid")
else:
error = "password_not_hash"
else:
abort(400)
return APIResponse.response_200(error=error)
@api.route('/api/account/login/otp', methods=["POST"])
def otp_verify():
if current_user.is_authenticated:
return APIResponse.response_200(error="authenticated")
elif 'tempusername' in session:
payload = request.get_json(force=True)
if "otp" in payload:
if len(payload["otp"]) == 6 and payload["otp"].isdigit():
try:
otp = SQL.select_data(
"`otp`", "users", "`username`=%s", session["tempusername"])[0][0]
except:
return APIResponse.response_200(error="no_otp")
totp = pyotp.TOTP(otp)
if totp.verify(payload["otp"]):
user = utilities.User()
user.id = session["tempusername"]
login_user(user)
del session["tempusername"]
if "otp_tried" in session:
del session["otp_tried"]
return APIResponse.response_200()
else:
data = None
if "otp_tried" not in session:
session["otp_tried"] = 1
data = {"tried": session["otp_tried"]}
else:
if session["otp_tried"] < 2:
session["otp_tried"] += 1
data = {"tried": session["otp_tried"]}
else:
data = {"tried": session["otp_tried"]}
# Lock account
SQL.update_data(
"users", "locked=b%s", "1", "username=%s", session["tempusername"])
del session["otp_tried"]
del session["tempusername"]
return APIResponse.response_200(error="otp_error", data=data)
else:
return APIResponse.response_200(error="otp_error")
else:
abort(400)
else:
return APIResponse.response_403_api()
@api.route('/api/account/', methods=["GET"])
def account():
if current_user.is_authenticated:
return APIResponse.response_200(data={"status": "logged"})
else:
return utilities.error_message["authentication_needed"], 403
@api.route('/api/profile/', methods=["GET"])
def get_profile():
if current_user.is_authenticated:
results = SQL.select_data(
"*", "metadata", "owner=%s ORDER BY date DESC", current_user.id)
if len(results) <= 0:
results = "You do not have any video yet."
converted_results = []
for result in results:
items = {}
items["videoid"] = result[0]
items["title"] = result[1]
items["description"] = result[2]
items["privacy"] = result[3]
items["processing"] = int.from_bytes(result[4], byteorder='big')
items["live"] = result[6]
items["views"] = result[7]
items["uploadtime"] = int(result[8].timestamp())
converted_results.append(items)
return return_json_list(converted_results)
else:
return APIResponse.response_403_api()
@api.route('/api/search/', methods=["GET"])
def search_api():
if "keyword" in request.args:
keywords = request.args.get("keyword")
if keywords.strip() != "":
keywords = keywords.split()
search_condition = "title LIKE %s"
search_keyword = [f"%{keywords[0]}%"]
lenght = len(keywords)
if lenght > 1:
i = 1
while i < lenght:
search_condition += " OR title LIKE %s"
search_keyword.append(f"%{keywords[i]}%")
i += 1
results = SQL.select_data("`metadata`.videoid, title, description, uuid", "metadata INNER JOIN video " +
"ON `metadata`.`videoid` = `video`.`videoid`", f"privacy=0 AND processing=b'0' AND ({search_condition}) ORDER BY views DESC", tuple(search_keyword))
converted_results = []
for result in results:
items = {}
items["videoid"] = result[0]
items["title"] = result[1]
items["description"] = result[2]
converted_results.append(items)
if len(converted_results) > 0:
return return_json_list(converted_results)
else:
return APIResponse.response_200(error="video_not_found")
abort(400)
@api.route("/api/video/comments/<string:videoid>/", methods=["GET", "POST"])
def comments(videoid):
if request.method == "GET":
data = SQL.select_data("`user`, `comment`", "interaction", "type=%s AND videoid=%s",
(b"1", videoid))
return APIResponse.response_200(data=data)
else:
if current_user.is_authenticated:
payload = request.get_json(force=True)
if "comment" in payload:
comment = html.escape(payload["comment"] if len(
payload["comment"]) <= 500 else payload["comment"][:500])
result = SQL.insert_data("interaction", "videoid, user, comment, type", "%s, %s, %s, b'1'",
(videoid, current_user.id, comment))
if result:
return APIResponse.response_200(message="Comment posted", data={"username": current_user.id})
else:
abort(500, description="Server error")
else:
abort(400)
else:
return utilities.error_message["authentication_needed"], 403
@api.route("/api/video/like/<string:videoid>/", methods=["GET", "PATCH"])
def like(videoid):
if current_user.is_authenticated:
if request.method == "GET":
data = SQL.select_data(
"COUNT(*)", "`interaction`", "`type`=%s AND videoid=%s AND user=%s", ("b'0'", videoid, current_user.id))[0][0]
if data >= 1:
liked = True
else:
liked = False
return APIResponse.response_200(data={"videoid": videoid, "username": current_user.id, "liked": liked})
else:
try:
SQL.select_data("videoid", "metadata",
"videoid=%s", videoid)[0][0]
except:
return "Video not found", 404
result = SQL.select_data(
"user", "`interaction`", "videoid=%s AND type=%s AND user=%s", (videoid, "b'0'", current_user.id))
if len(result) > 0:
SQL.delete_data(
"`interaction`", "user=%s AND videoid=%s AND type=0", (current_user.id, videoid))
SQL.update_data("`interaction`", "`likes`=`likes`-%s", "1",
"videoid=%s AND `user` IS NULL AND `type`=b'0'", videoid)
return APIResponse.response_200()
else:
SQL.insert_data("`interaction`", "`videoid`, `user`, type",
"%s, %s, 0", (videoid, current_user.id))
SQL.update_data("`interaction`", "`likes`=`likes`+%s", "1",
"videoid=%s AND `user` IS NULL AND `type`=b'0'", videoid)
return APIResponse.response_200()
else:
return APIResponse.response_200(error="not_yet_authenticate")
@api.route("/api/video/likes/<string:videoid>/", methods=["GET"])
def likes(videoid):
if request.method == "GET":
data = SQL.select_data("COUNT(*), `metadata`.`privacy`, `metadata`.`owner`", "`interaction` INNER JOIN `metadata` ON `metadata`.`videoid`=`interaction`.`videoid`",
"`type`=%s AND `interaction`.`videoid`=%s", ("b'0'", videoid))
if data:
if data[0][1] >= 2:
if not current_user.is_authenticated or current_user.id != data[0][2]:
return APIResponse.response_403_api()
return APIResponse.response_200(data={"videoid": videoid, "likes": data[0][0]})
else:
return "Video does not exists", 404
@api.route("/api/video/<string:videoid>", methods=["DELETE"])
def delete_video(videoid):
if current_user.is_authenticated:
payload = request.get_json(force=True)
data = SQL.select_data("`metadata`.`videoid`, `metadata`.`owner`, `video`.`uuid`",
"`metadata` INNER JOIN `video` ON `metadata`.videoid=`video`.videoid", "`metadata`.`videoid`=%s", videoid)
if "videoid" in payload and len(payload["videoid"]) == 10 and payload["videoid"].isalnum():
if payload["videoid"] == videoid:
if len(data) > 0:
# or current_user.role == 'adminstrator':
if current_user.id == data[0][1]:
if delete_all_video_data(videoid, data[0][2]):
return APIResponse.response_204()
else:
return APIResponse.response_200(error="unknown")
else:
return APIResponse.response_403_api()
else:
return "Video does not exists", 404
else:
return APIResponse.response_200(error="incorrect_videoid")
else:
abort(400)
else:
return utilities.error_message["authentication_needed"], 403
|
catalog_collections.py
|
""" catalog collections
"""
import argparse
import hashlib
import json
import multiprocessing
import os
import re
import subprocess
import sys
from collections import Counter
from collections import OrderedDict
from datetime import datetime
from glob import glob
from json.decoder import JSONDecodeError
from typing import Dict
from typing import List
from typing import Tuple
from ansible.utils.plugin_docs import get_docstring # type: ignore
import yaml
from yaml.error import YAMLError
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader # type: ignore
from key_value_store import KeyValueStore # type: ignore
PROCESSES = (multiprocessing.cpu_count() - 1) or 1
class CollectionCatalog:
# pylint: disable=too-few-public-methods
"""collection cataloger"""
def __init__(self, directories: List[str]):
self._directories = directories
self._collections: OrderedDict[str, Dict] = OrderedDict()
self._errors: List[Dict[str, str]] = []
self._messages: List[str] = []
def _catalog_plugins(self, collection: Dict) -> None:
"""catalog the plugins within a collection"""
path = collection["path"]
file_chksums = {}
file_manifest_file = collection.get("file_manifest_file", {}).get("name")
if file_manifest_file:
fpath = f"{path}/{file_manifest_file}"
if os.path.exists(fpath):
with open(fpath) as read_file:
try:
loaded = json.load(read_file)
file_chksums = {v["name"]: v for v in loaded["files"]}
except (JSONDecodeError, KeyError) as exc:
self._errors.append({"path": fpath, "error": str(exc)})
exempt = ["action", "module_utils", "doc_fragments"]
plugin_dirs = [
(f.name, f.path)
for f in os.scandir(path + "plugins")
if f.is_dir() and f.name not in exempt
]
for plugin_type, path in plugin_dirs:
if plugin_type == "modules":
plugin_type = "module"
for (dirpath, _dirnames, filenames) in os.walk(path):
self._process_plugin_dir(plugin_type, filenames, file_chksums, dirpath, collection)
@staticmethod
def _generate_chksum(file_path: str, relative_path: str) -> Dict:
"""genrate a std checksum for a file"""
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as fhand:
for byte_block in iter(lambda: fhand.read(4096), b""):
sha256_hash.update(byte_block)
res = {
"name": relative_path,
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": sha256_hash.hexdigest(),
"format": 1,
}
return res
def _process_plugin_dir(
self, plugin_type: str, filenames: List, file_chksums: Dict, dirpath: str, collection: Dict
) -> None:
# pylint: disable=too-many-arguments
"""process each plugin within one plugin directory"""
for filename in filenames:
file_path = f"{dirpath}/{filename}"
relative_path = file_path.replace(collection["path"], "")
_basename, extention = os.path.splitext(filename)
if not filename.startswith("__") and extention == ".py":
chksum_dict = file_chksums.get(relative_path)
if not chksum_dict:
chksum_dict = self._generate_chksum(file_path, relative_path)
chksum = chksum_dict[f"chksum_{chksum_dict['chksum_type']}"]
collection["plugin_chksums"][chksum] = {"path": relative_path, "type": plugin_type}
def _one_path(self, directory: str) -> None:
"""process the contents of an <...>/ansible_collections/ directory"""
for directory_path in glob(f"{directory}/*/*/"):
manifest_file = f"{directory_path}/MANIFEST.json"
galaxy_file = f"{directory_path}/galaxy.yml"
collection = None
if os.path.exists(manifest_file):
with open(manifest_file) as read_file:
try:
collection = json.load(read_file)
collection["meta_source"] = "MANIFEST.json"
except JSONDecodeError:
error = {
"path": os.path.dirname(manifest_file),
"error": "failed to load MANIFEST.json",
}
self._errors.append(error)
elif os.path.exists(galaxy_file):
with open(galaxy_file) as read_file:
try:
collection = {"collection_info": yaml.load(read_file, Loader=SafeLoader)}
collection["meta_source"] = "galaxy.yml"
except YAMLError:
error = {
"path": os.path.dirname(galaxy_file),
"error": "failed to load galaxy.yml",
}
self._errors.append(error)
if collection:
cname = f"{collection['collection_info']['namespace']}"
cname += f".{collection['collection_info']['name']}"
collection["known_as"] = cname
collection["plugins"] = []
collection["plugin_chksums"] = {}
collection["path"] = directory_path
runtime_file = f"{directory_path}/meta/runtime.yml"
collection["runtime"] = {}
if os.path.exists(runtime_file):
with open(runtime_file) as read_file:
try:
collection["runtime"] = yaml.load(read_file, Loader=SafeLoader)
except YAMLError as exc:
self._errors.append({"path": runtime_file, "error": str(exc)})
self._collections[collection["path"]] = collection
else:
msg = (
f"collection path '{directory_path}' is ignored as it does not"
" have 'MANIFEST.json' and/or 'galaxy.yml' file(s)."
)
self._messages.append(msg)
def _find_shadows(self) -> None:
"""for each collection, determin which other collections are hiding it"""
collection_list = list(self._collections.values())
counts = Counter([collection["known_as"] for collection in collection_list])
for idx, (cpath, o_collection) in reversed(list(enumerate(self._collections.items()))):
self._collections[cpath]["hidden_by"] = []
if counts[o_collection["known_as"]] > 1:
for i_collection in reversed(collection_list[0:idx]):
if i_collection["known_as"] == o_collection["known_as"]:
self._collections[cpath]["hidden_by"].insert(0, i_collection["path"])
def process_directories(self) -> Tuple[Dict, List]:
"""process each parent directory"""
for directory in self._directories:
collection_directory = f"{directory}/ansible_collections"
if os.path.exists(collection_directory):
self._one_path(collection_directory)
for _cpath, collection in self._collections.items():
self._catalog_plugins(collection)
self._find_shadows()
return self._collections, self._errors
def worker(pending_queue: multiprocessing.Queue, completed_queue: multiprocessing.Queue) -> None:
"""extract a doc from a plugin, place in completed q"""
# pylint: disable=ungrouped-imports
# pylint: disable=import-outside-toplevel
# load the fragment_loader _after_ the path is set
from ansible.plugins.loader import fragment_loader # type: ignore
while True:
entry = pending_queue.get()
if entry is None:
break
collection_name, chksum, plugin_path = entry
try:
(doc, examples, returndocs, metadata) = get_docstring(
filename=plugin_path,
fragment_loader=fragment_loader,
collection_name=collection_name,
)
except Exception as exc: # pylint: disable=broad-except
err_message = f"{type(exc).__name__} (get_docstring): {str(exc)}"
completed_queue.put(("error", (chksum, plugin_path, err_message)))
continue
try:
q_message = {
"plugin": {
"doc": doc,
"examples": examples,
"returndocs": returndocs,
"metadata": metadata,
},
"timestamp": datetime.utcnow().isoformat(),
}
completed_queue.put(("plugin", (chksum, json.dumps(q_message, default=str))))
except JSONDecodeError as exc:
err_message = f"{type(exc).__name__} (json_decode_doc): {str(exc)}"
completed_queue.put(("error", (chksum, plugin_path, err_message)))
def identify_missing(collections: Dict, collection_cache: KeyValueStore) -> Tuple[set, List, int]:
"""identify plugins missing from the cache"""
handled = set()
missing = []
plugin_count = 0
for _cpath, collection in collections.items():
for chksum, details in collection["plugin_chksums"].items():
plugin_count += 1
if chksum not in handled:
if chksum not in collection_cache:
missing.append(
(collection["known_as"], chksum, f"{collection['path']}{details['path']}")
)
handled.add(chksum)
return handled, missing, plugin_count
def parse_args():
"""parse the cli args"""
parser = argparse.ArgumentParser(description="Catalog collections.")
parser.add_argument(
"-d",
dest="dirs",
nargs="+",
help="search withing the specified directories",
default=current_collection_paths,
)
parser.add_argument("-a", dest="adjacent", help="prepended to dirs")
parser.add_argument(
"-c", dest="collection_cache_path", help="path to collection cache", required=True
)
parsed_args = parser.parse_args()
adjacent = vars(parsed_args).get("adjacent")
if adjacent:
directories = [adjacent] + parsed_args.dirs
else:
directories = parsed_args.dirs
directories.extend(reversed(sys.path))
resolved = []
for directory in directories:
realpath = os.path.realpath(directory)
if realpath not in resolved:
resolved.append(realpath)
return parsed_args, resolved
def retrieve_collections_paths() -> Dict:
"""retrieve the currently ser collection paths"""
cmd = ["ansible-config", "dump", "|", "grep", "COLLECTIONS_PATHS"]
proc_out = run_command(cmd)
if "error" in proc_out:
return proc_out
regex = re.compile(r"^(?P<variable>\S+)\((?P<source>.*)\)\s=\s(?P<current>.*)$")
parsed = regex.match(proc_out["stdout"])
if parsed:
try:
current = yaml.load(parsed.groupdict()["current"], Loader=SafeLoader)
return {"result": current}
except (YAMLError, KeyError) as exc:
return {"error": str(exc)}
return {"error": f"corrupt current collection path: {proc_out['stdout']}"}
def retrieve_docs(
collection_cache: KeyValueStore, errors: List, missing: List, stats: Dict
) -> None:
# pylint: disable=too-many-locals
"""extract the docs from the plugins"""
pending_queue = multiprocessing.Manager().Queue()
completed_queue = multiprocessing.Manager().Queue()
processes = []
for _proc in range(PROCESSES):
proc = multiprocessing.Process(target=worker, args=(pending_queue, completed_queue))
processes.append(proc)
proc.start()
for entry in missing:
pending_queue.put(entry)
for _proc in range(PROCESSES):
pending_queue.put(None)
for proc in processes:
proc.join()
while not completed_queue.empty():
message_type, message = completed_queue.get()
if message_type == "plugin":
chksum, plugin = message
collection_cache[chksum] = plugin
stats["cache_added_success"] += 1
elif message_type == "error":
chksum, plugin_path, error = message
collection_cache[chksum] = json.dumps({"error": error})
errors.append({"path": plugin_path, "error": error})
stats["cache_added_errors"] += 1
def run_command(cmd: List) -> Dict:
"""run a command"""
try:
proc_out = subprocess.run(
" ".join(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
shell=True,
)
return {"stdout": proc_out.stdout}
except subprocess.CalledProcessError as exc:
return {"error": str(exc)}
def main() -> Dict:
"""main"""
stats = {}
stats["cache_added_success"] = 0
stats["cache_added_errors"] = 0
cc_obj = CollectionCatalog(directories=parent_directories)
collections, errors = cc_obj.process_directories()
stats["collection_count"] = len(collections)
collection_cache_path = os.path.abspath(os.path.expanduser(args.collection_cache_path))
collection_cache = KeyValueStore(collection_cache_path)
handled, missing, plugin_count = identify_missing(collections, collection_cache)
stats["plugin_count"] = plugin_count
stats["unique plugins"] = len(handled)
stats["processed"] = len(missing)
if missing:
retrieve_docs(collection_cache, errors, missing, stats)
cached_chksums = collection_cache.keys()
stats["cache_length"] = len(collection_cache.keys())
for _cpath, collection in collections.items():
for no_doc in set(collection["plugin_chksums"].keys()) - set(cached_chksums):
del collection["plugin_chksums"][no_doc]
collection_cache.close()
return {
"collections": collections,
"errors": errors,
"stats": stats,
"messages": cc_obj._messages,
}
if __name__ == "__main__":
start_time = datetime.now()
collection_paths = retrieve_collections_paths()
if "error" in collection_paths:
sys.exit(collection_paths["error"])
else:
current_collection_paths = collection_paths["result"]
args, parent_directories = parse_args()
collection_scan_paths = ":".join(parent_directories)
os.environ["ANSIBLE_COLLECTIONS_PATHS"] = collection_scan_paths
result = main()
result["stats"]["duration"] = (datetime.now() - start_time).total_seconds()
result["collection_scan_paths"] = collection_scan_paths
print(json.dumps(result, default=str))
|
sensor.py
|
# coding=utf-8
import subprocess
import re, os, sys, pdb, time
topo_path = os.path.abspath(os.path.join('..', '..', 'Topology'))
sys.path.insert(0, topo_path)
from util import *
from create_merge_topo import *
from build_virtual_topo import *
import logging
from threading import Lock
from datetime import datetime
class sensor(object):
lock=Lock()
def __init__(self, id, nh, net, config_file, hosts=[], active = True, passive = True, full = False, ignored_ips=[],
known_ips = [], max_fail = 5, simulation=False, readmit=True, verbose=False, interactive=False,
include_hosts=False, asid=None, msid=None, intf = 'any', clean_cmd = [], sleep_time = 10,
subnets = '192.168.0.0/16', nrr=True, pattern='', **pattern_params):
'''
:param id: the id of this sensor. It must be the ID of the node on which the sensor is placed.
:param nh: the number of hosts (monitors) to be used when they are chosen randomly to run iTop(if hosts=[])
:param net: Mininet network reference
:param config_file: Configuration file for the blockchain client run by this sensor
:param hosts: the list of hosts (monitors) to be used to run iTop
:param active: tell whether the sensor should have active capabilities (e.g. ping)
:param passive: tell whether the sensor should have passive capabilities (sniff traffic)
:param full: if False, run iTop with reduced monitors selection; else run iTop with all available monitors
:param ignored_ips: list of ips that the sensor should ignore (ex.: the hosts that do not belong to topology)
:param known_ips: list of known ips. Could even start a sensor with empty list
:param max_fail: maximum number of consecutive PING failures that is tollerated before declaring a node dead
:param simulation: True if the sensor has to be run on a Mininet host. The active sensor capabilities change.
:param readmit: If set to False, an ip will never be readmitted in the topology after it has been declared
'dead'. Set to False when the sensor is used in Mininet. Default: 'True'.
:param verbose: Set True if you wish to print verbose information
:param interactive: If the sensor is run in interactive mode, the stop signal can be sent via user input.
:param include_hosts: When collecting traces, consider also final hosts
:param asid: Active Sensor ID. Used when the simulation is run on Mininet. In that case, the active and the
passive sensor capabilities may be run on two different hosts. This happens if asid is different from None.
:param msid: Monitor Sensor ID. Used when the simulation is run on Mininet. In that case, the sensor uses a
third, dedicated host to run the topology inference algorithm (only one command at a time can be run on
Mininet hosts..)
:param intf: Capture interface. Default: any.
:param clean_cmd: List of commands to be executed by this sensor to clean previously generated traces
:param sleep_time: time to wait before the next cycle of alive/dead nodes checks is started from the sensor
:param subnets: The network or combination of networks to which packet sniffing is restricted to (only applies if simulation=True)
:param nrr: Set to False if you want to skip Non Responding Routers (NRR) from the topology. The transactions
are rearranged to reflect this modification to the topology.
:param pattern: The pattern to which the induced topology should fit
:param **pattern_params: The parameters passed to manage the specific pattern
'''
setup_logger('sensor ' + id + '.log', 'sensor ' + id + '.log')
self.__logger = logging.getLogger('sensor ' + id + '.log')
self.__id = id
self.__active = active
self.__passive = passive
self.__full = full
self.__known_ips = list(known_ips)
self.__fail_count = {} # Counts the number of unsuccessful, consecutive ping replies for each known ip
for ip in known_ips:
self.__fail_count[ip] = 0
self.__max_fail = max_fail
self.__end = False # Flag to tell sensor to end
self.__ended = False # Flag set to True when the sensor really ended
self.__simulation = simulation
self.__verbose = verbose
self.__readmit = readmit
self.__dead = [] # Updated when found a dead node. Set empty soon after the dead node has been managed.
self.__banned = ignored_ips
self.__new = [] # Updated when found a new node. Set empty soon after the new node has been managed.
self.__nh = nh
self.__hosts = hosts
self.__net = net
self.__interactive = interactive
self.__with_hosts = include_hosts
self.__asid = asid if asid is not None else id
self.__msid = msid # If msid is different from None, run iTop using only this (monitor) sensor as source
self.__alias = create_alias()
self.__intf = intf
self.__clean_cmd = clean_cmd
self.__intercycles_time = sleep_time
self.__subnets = subnets
self.__nrr = nrr
self.__pattern = pattern
self.__pattern_params = pattern_params
self.__c = configure_client(config_file)
register_client(self.__c)
#self.impose_pattern() #TODO
#TODO conviene avere metodi synchronized per accedere in scrittura a S.D. condivise
def start(self):
''' Starts the sensor.'''
if self.__passive:
if self.__simulation:
threading.Thread(target=self.passive_sensor_on_Mininet).start()
else:
threading.Thread(target=self.passive_sensor).start()
if self.__active:
if self.__simulation:
threading.Thread(target=self.active_sensor_on_mininet).start()
else:
threading.Thread(target=self.active_sensor).start()
threading.Thread(target=self.run).start()
if self.__interactive: threading.Thread(target=self.wait_user_input).start()
def run(self):
while not self.__end:
if self.__active:
self.check_dead_nodes()
if self.__passive:
self.check_new_nodes()
time.sleep(self.__intercycles_time)
self.__ended = True
def active_sensor(self):
'''Runs active sensor capabilities'''
while not self.__end:
for ip in self.__known_ips:
try:
p = subprocess.Popen(['ping', '-c', '1', '-W', '1', ip], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
result = p.returncode
if result != 0: # Not received correct reply
self.handle_unsuccessful_ping(ip)
else:
self.__fail_count[ip] = 0
except subprocess.CalledProcessError:
print 'Error with the ping subprocess'
time.sleep(5)
def active_sensor_on_mininet(self):
'''Runs active sensor capabilities on a Mininet host'''
while not self.__end:
for ip in self.__known_ips:
s = self.__net[self.__asid]
result = s.cmd('ping -c 1 -W 1 ' + ip + ' | grep received | awk \'{print $4}\'') #TODO cmd and not sendCmd
if self.__verbose: print 'PING ' + s.IP() + ' -> ' + ip + ' : ' + result.rstrip() + '/1 pacchetti ricevuti correttamente\n'
try:
if int(result) != 1: # Not received the correct packet back
self.handle_unsuccessful_ping(ip)
else:
self.__fail_count[ip] = 0
if self.__verbose: print '\nAfter Success, Fail count for ' + ip + '= ' + str(self.__fail_count[ip])
except ValueError:
self.handle_unsuccessful_ping(ip)
time.sleep(5)
def handle_unsuccessful_ping(self, ip):
#pdb.set_trace()
try:
self.__fail_count[ip] = self.__fail_count[ip] + 1
if self.__verbose: print '\n' + self.__msid + ' : Fail count for ' + ip + ' = ' + str(self.__fail_count[ip])
if self.__fail_count[ip] > self.__max_fail:
self.__dead.append(ip)
if not self.__readmit:
self.__banned.append(ip)
print '\nBanned ' + ip
self.__known_ips.remove(ip)
del self.__fail_count[ip]
except KeyError:
self.__logger.info('Key error due to ip ' + ip)
def passive_sensor_on_Mininet(self):
'''Runs passive sensor capabilities'''
# Limitazione sulla sottorete se topologia simulata su Mininet. Problema: tcpdump non filtra sia dst che dst network
tcpdump_cmd = 'sudo timeout 30 tcpdump -l -i ' + self.__intf + ' net ' + self.__subnets + ' >> tcpdump_out'+ self.__id if self.__simulation \
else 'sudo timeout 30 tcpdump -l -i ' + self.__intf + ' >> tcpdump_out'+ self.__id #TODO TWEAK TIMEOUT '-i', self.__id + '-eth0
s = self.__net[self.__id]
threading.Thread(target=self.blocking_cmd_on_Mininet_host, args=(tcpdump_cmd, s,)).start()
time.sleep(3)
with open('tcpdump_out'+self.__id, 'r') as file:
while not self.__end:
line = file.readline()
while line != '': # Sensor wrote something there..
try:
src_ip = line.split()[2]
#dst_ip = line.split()[4]
s_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", src_ip)
#d_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", dst_ip)
if s_match:
self.handle_match(self.clean_ip(src_ip))
#if d_match:
# self.handle_match(self.clean_ip(dst_ip))
except IndexError:
pass # 'Invalid output from tcpdump'
line = file.readline()
time.sleep(2)
#TODO sembra che 2 comandi allo stesso host causino errore
def blocking_cmd_on_Mininet_host(self, command, host):
'''
Runs a blocking command (with a TIMEOUT) on a Mininet host using a separate thread of execution.
Must use if the non blocking sendCmd() causes assertion error.
'''
self.__logger.info('Cmd: ' + command)
while not self.__end:
host.cmd(command)
print '\n\nEXITING PASSIVE THREAD\n\n'
def passive_sensor(self):
'''Runs passive sensor capabilities'''
# Limitazione sulla sottorete se topologia simulata su Mininet. Problema: tcpdump non filtra sia dst che dst network
cmd = ['sudo', 'tcpdump', '-l', '-i', 'any', 'net', '192.168.0.0/16'] if self.__simulation \
else ['sudo', 'tcpdump', '-l', '-i', 'any']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print '------------------- START SNIFFING ----------------------'
for row in iter(p.stdout.readline, b''):
try:
src_ip = row.split()[2]
#dst_ip = row.split()[4] #TODO tolto indirizzo IP destinazione: non è detto che quell' host esista/sia raggiungibile
s_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", src_ip)
#d_match = re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", dst_ip)
if s_match:
self.handle_match(self.clean_ip(src_ip))
#if d_match:
# self.handle_match(self.clean_ip(dst_ip))
except IndexError:
print 'Invalid output from tcpdump'
if self.__end:
p.terminate()
break
def handle_match(self, ip):
if ip not in self.__banned:
if ip not in self.__known_ips:
# Further check, only in case of simulation (limited to subnet 192.168)
#TODO nei precedenti test c'era l' if, controlla se è sempre ok
#if (not self.__simulation or (int(ip.split('.')[0])==192 and int(ip.split('.')[1])==168)):
print '\nNew IP discovered: ' + ip
self.__new.append(ip)
self.__known_ips.append(ip)
self.__fail_count[ip] = 0
self.__logger.info(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' : New IP discovered by ' + self.__id + ' : ' + ip)
#TODO The passive sensor could be used to reset the fail count. It is sufficient to move the last line
#of the function after the second if (not inside). (now this functionality is disabled, because
# MIninet has switches that send IP packets but do not respond to ping, and that definetely should not
# belong to topology)
def check_new_nodes(self):
'''Checks whether the passive sensor found traffic dealing with a new, unknown host.
In such a case, run a new instance of iTop and update the topology in the ledger.'''
new = list(self.__new)
if (len(new)) != 0:
res = False
if (not self.__full) and (not self.__with_hosts): # fastiTop was tested to work only with routers
(res, out) = self.fastiTop(self.__new)
if self.__full or res == False: # if fastiTop is not run it is guaranteed that full iTop is run
out = self.fulliTop()
topo = get_topo_from_json(out)
trans = get_transactions_from_topo(topo) if self.__nrr else create_transactions_for_compacted_topo(topo)
self.__c.send_transactions(trans)
self.__new = list(set(self.__new) - set(new)) # In the meantime some new IP could have arrived..
def check_dead_nodes(self):
'''Checks whether the active sensor discovered a dead node (ip address non responding to ping for more
than max times). In that case, tell the Blockchain servers that such a node no longer exists.'''
trans = []
for n in self.__dead:
print '\nDead node: ' + n + '\n'
try:
tx = transaction(topology_node(self.__alias[n], 'R'), None, False)
trans.append(tx)
except KeyError:
print '\n' + n + ' does not belong to the topology\n' # Only because we are in a simulation
self.__dead.remove(n)
if len(trans) > 0:
self.__c.send_transactions(trans)
self.__dead = []
self.__full = True # Previously gathered traces are no longer valid -> a full run of iTop is required
def clean_ip(self, raw_ip):
'Clean the ip. A slightly different cleaning is done based on whether the ip is source or destination.'
#bytes = raw_ip.split('.')
bytes = re.split('\.|:', raw_ip)
return bytes[0] + '.' + bytes[1] + '.' + bytes[2] + '.' + bytes[3]
def wait_user_input(self):
while not self.__end:
#TODO if received user input to stop, stop the sensor. Migliora interfaccia di stop
choice = raw_input("Type 'Q' to quit the sensor.\n")
if choice =='Q' or choice == 'q':
self.stop()
def fulliTop(self):
'''
Runs iTop on the existing topology with all available monitors and returns the filename of the induced topology.
'''
# If a simulation involves final hosts, the commands on the hosts must be executed sequentially
if self.__simulation and self.__with_hosts: sensor.lock.acquire()
self.clean_traces()
self.__logger.info('Run full iTop')
hosts = self.__hosts
if len(self.__hosts) == 0:
hosts = get_hosts(int(self.__nh))
if self.__with_hosts: hosts = self.ips_alias(hosts)
self.__logger.info('full iTop hosts: ')
for h in hosts:
self.__logger.info(h)
create_traces(self.__net, hosts, src_hosts=[self.__msid], suffix=self.__msid+'/') if self.__msid is not None else \
create_traces(self.__net, hosts, suffix=self.__msid+'/') # TODO: Non c'era create traces prima
if self.__msid is not None:
(vtopo, traces) = create_virtual_topo_and_traces(self.__alias, hosts, src_hosts=[self.__msid], include_host=self.__with_hosts, suffix=self.__msid+'/')
else:
(vtopo, traces) = create_virtual_topo_and_traces(self.__alias, hosts, include_host=self.__with_hosts, suffix=self.__msid+'/')
(M, C) = create_merge_options(vtopo, traces)
(M, mtopo) = create_merge_topology(M, vtopo, C)
out = write_topo_to_file(self.__id, mtopo, hosts)
self.__full = False
if sensor.lock.locked(): sensor.lock.release()
return out
def fastiTop(self, nnodes):
'''
Runs iTop with a reduced set of hosts, based on previous inferred topology.
:return: a pair (result, topology_filename). result is True if the inferred topology is considered satisfying,
False if a full run of iTop has instead to be done. (Some already known nodes were not discovered..)
'''
if self.__simulation and self.__with_hosts: sensor.lock.acquire()
hosts = previous_hosts()
alias = create_alias()
# Reconstruct old topology
#src_hosts = [self.__msid] if self.__msid is not None else []
(M, old_topo, traces) = get_old_topo(hosts, alias) #, src_hosts = src_hosts, include_host=self.__with_hosts)
# Reconstruct new_topo
src_pairs = find_sources(traces,
monitors=True) # Returns a pair because the monitor does not belong to topology
C = compute_monitors_coverage(src_pairs, old_topo, hosts)
shosts = monitors_selection(C, old_topo)
# Infer the (new) topology using the (optimal ?) selection of probing stations
compute_distances(self.__net, hosts, src_hosts=shosts)
create_traces(self.__net, hosts, src_hosts=shosts, suffix=self.__msid+'/')
(vtopo, traces) = create_virtual_topo_and_traces(alias, hosts, src_hosts=shosts, suffix=self.__msid+'/')
'''
if self.__msid is not None: #TODO CHECK!!!
(vtopo, traces) = create_virtual_topo_and_traces(self.__alias, hosts, src_hosts=[self.__msid], include_host=self.__with_hosts)
else:
(vtopo, traces) = create_virtual_topo_and_traces(self.__alias, hosts, include_host=self.__with_hosts)
'''
(M, C) = create_merge_options(vtopo, traces)
(M, new_topo) = create_merge_topology(M, vtopo, C)
if sensor.lock.locked(): sensor.lock.release() #Run of iTop ended
# Compare the old topology against the new one, to see if the gathered traces are enough or all monitors must be used.
nnalias = []
for n in nnodes:
try:
nnalias.append(alias[n])
except KeyError:
print '\n' + n + ' does not belong to the topology, its alias does not exist\n'
end = compare_topologies(old_topo, new_topo, new_nodes = nnalias)
if end:
out = write_topo_to_file(self.__id, new_topo, shosts)
self.__logger.info('Fast iTop successfully used')
return (True, out)
return (False, None)
def impose_pattern(self):
'''
Generates the pattern-specific transactions and sends them to the Blockchain
'''
trans = self.pattern_transactions()
if len(trans) > 0: self.__c.send_transactions(trans)
def pattern_transactions(self):
'''
Returns a list of transactions to be sent to Blockchain nodes to fit the induced topology to the pattern
:return: List of transactions
'''
if self.__pattern == 'tree':
return self.tree_pattern_transactions()
else: #TODO each different pattern has a specific handler. Put here a series of 'elif'...
return []
def tree_pattern_transactions(self):
'''
Returns the pair of transactions needed to fit the induced topology to a tree topology:
Root -> Child and Child -> Root.
If Root_IP is specified, the transactions are added only if Root_IP is not a known host, else if Root_IP
is not specified the transactions are added anyway.
If child_IP is specified, the child node is the one specified, otherwise we assume this sensor to be the
child of the root.
:return: List of two transactions
'''
#TODO: va controllato cosa succede se un sensore 'trova' la radice e un altro no
# After retrieving the 'names' of the nodes, we insert in the Blockchain the transactions Root -> Child and Child -> Root
root_IP = self.__pattern_params['root_IP'] if 'root_IP' in self.__pattern_params else ''
child_IP = self.__pattern_params['child_IP'] if 'child_IP' in self.__pattern_params else ''
if root_IP is not '':
if root_IP in self.__known_ips:
return [] # Nothing to do, we already discovered the root
else:
alias = create_alias()
root = topology_node(alias[root_IP], 'P') if root_IP in alias else topology_node('ROOT', 'P')
else:
root = topology_node('ROOT', 'P')
if child_IP is not '':
alias = create_alias()
if child_IP in self.__known_ips:
child = topology_node(alias[child_IP], 'R') if child_IP in alias else topology_node(self.__msid, 'R')
else:
child = topology_node(alias[child_IP], 'P') if child_IP in alias else topology_node(self.__msid, 'R')
else:
child = topology_node(self.__msid, 'R')
trx1 = transaction(child, root)
trx2 = transaction(root, child)
return [trx1, trx2]
def ips_alias(self, hosts):
'''
Scans the known_ips + host list to create a list of alias. If an IP has no known alias, it does not insert that
IP in the returned list.
'''
alias = create_alias()
shosts = set()
for ip in self.__known_ips:
try:
a = alias[ip]
shosts.add(a)
except KeyError:
pass # If an IP has no alias, we simply do not return it
shosts.union(set(hosts)) # Hosts are already aliased
return list(shosts)
def clean_traces(self):
'''
Runs commands to clean the previously generated traces.
'''
if self.__clean_cmd == []:
os.system('./init.sh') # Clean traces: use default script
else:
for c in self.__clean_cmd:
os.system(c)
#TODO lo stoppi in questo modo? Considera se devi proteggere variabili con lock o no
def stop(self):
# TODO pattern viene imposto prima di stoppare il sensore per comodità di simulazione, potresti farlo quando vuoi
#if self.__pattern is not '' : self.impose_pattern()
self.__end = True
def wait_end(self):
while not self.__ended:
time.sleep(5)
|
reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import six
import threading
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
__all__ = ['PyReader']
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
class PyReader(object):
"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader object.
use_double_buffer (bool): whether to use double_buffer_reader to
speed up data feeding.
iterable (bool): whether the created reader object is iterable.
Returns:
reader (Reader): the created reader object.
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
# definition of network is omitted
executor = fluid.Executor(fluid.CUDAPlace(0))
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]),
return reader
image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=True)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CUDAPlace(0))
# definition of network is omitted
executor = fluid.Executor(fluid.CUDAPlace(0))
executor.run(fluid.default_main_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data)
"""
unique_name_generator = UniqueNameGenerator()
def __init__(self,
feed_list,
capacity,
use_double_buffer=True,
iterable=False):
self._tensor_reader = None
self._thread = None
self._iterable = iterable
self._use_double_buffer = use_double_buffer
self._capacity = capacity
self._feed_list = feed_list
if not self._iterable:
self._init_non_iterable()
def _init_iterable(self, places):
self._var_names = [v.name for v in self._feed_list]
self._places = _convert_places(places)
self._queue = core.init_lod_tensor_blocking_queue(core.Variable(),
self._capacity)
self._reader = core.create_py_reader(
self.queue, self._var_names, self._places, self._use_double_buffer)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
queue_name = PyReader.unique_name_generator('lod_tensor_blocking_queue')
reader_name = PyReader.unique_name_generator('create_py_reader')
double_buffer_name = PyReader.unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=reader_name)
startup_blk.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), startup_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __call__(self):
assert self.iterable, "PyReader is not iterable"
assert self._tensor_reader is not None, \
"Data source of PyReader has not set yet"
class Iterator(object):
def __init__(self, reader):
self._reader = reader._reader
self._reset = reader._reset
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
ret = self._reader.read_next()
if ret:
return ret
else:
self._reset()
raise StopIteration
self._start()
return Iterator(self)
def _reset(self):
self._reader.reset()
self._thread.join()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CUDAPlace(0))
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
assert not self._iterable, "start() cannot be called when PyReader is iterable"
self._start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CUDAPlace(0))
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
assert not self._iterable, "reset() cannot be called when PyReader is iterable"
self._reset()
def _start(self):
def __thread_main__():
try:
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
except Exception as ex:
self._queue.close()
raise ex
self._thread = threading.Thread(target=__thread_main__)
self._thread.daemon = True
self._thread.start()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int32')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CUDAPlace(0)])
# definition of network is omitted
executor = fluid.Executor(fluid.CUDAPlace(0))
executor.run(fluid.default_main_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data)
'''
assert batch_size > 0, "batch_size must be larger than 0"
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.decorate_sample_list_generator(
paddle.batch(
sample_generator,
batch_size=batch_size,
drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=sample_generator,
drop_last=drop_last)
self.decorate_batch_generator(reader, places=places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int32')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CUDAPlace(0))
# definition of network is omitted
executor = fluid.Executor(fluid.core.CUDAPlace(0))
executor.run(fluid.default_main_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data)
'''
assert self._tensor_reader is None, \
"Cannot reset the data source of PyReader"
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.decorate_batch_generator(__tensor_reader_impl__, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
yield batch_image, batch_label
return generator
image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int32')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CUDAPlace(0))
# definition of network is omitted
executor = fluid.Executor(fluid.CUDAPlace(0))
executor.run(fluid.default_main_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data)
'''
assert self._tensor_reader is None, \
"Cannot reset the data source of PyReader"
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when py_reader is iterable"
self._init_iterable(places)
|
22.thread_locks_as_contextManagers.py
|
import logging
import threading
import time
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def worker_with(lock):
with lock:
logging.debug("lock acquired")
def worker_nowith_lock(lock):
try:
lock.acquire()
except:
logging.debug("already locked")
finally:
logging.debug("lock released")
lock = threading.Lock()
w = threading.Thread(target=worker_with, args=(lock,))
nw = threading.Thread(target=worker_nowith_lock, args=(lock,))
w.start()
nw.start()
|
FederationService.py
|
#! /usr/local/bin/python2.7
# -*- coding: utf-8 -*-
#
#This software was developed by employees of the National Institute of
#Standards and Technology (NIST), and others.
#This software has been contributed to the public domain.
#Pursuant to title 15 Untied States Code Section 105, works of NIST
#employees are not subject to copyright protection in the United States
#and are considered to be in the public domain.
#As a result, a formal license is not needed to use this software.
#
#This software is provided "AS IS."
#NIST MAKES NO WARRANTY OF ANY KIND, EXPRESS, IMPLIED
#OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTY OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT
#AND DATA ACCURACY. NIST does not warrant or make any representations
#regarding the use of the software or the results thereof, including but
#not limited to the correctness, accuracy, reliability or usefulness of
#this software.
import Bootstrap
Bootstrap.setPath()
sbHome = Bootstrap.getSpectrumBrowserHome()
from flask import Flask, request, abort
from flask import jsonify
import PeerConnectionManager
import util
import argparse
import signal
import Log
import os
import Config
import authentication
import json
import sys
import traceback
import GetLocationInfo
from gevent import pywsgi
from multiprocessing import Process
import time
import pwd
app = Flask(__name__, static_url_path="")
app.static_folder = sbHome + "/flask/static"
@app.route("/federated/peerSignIn/<peerServerId>/<peerKey>", methods=["POST"])
def peerSignIn(peerServerId, peerKey):
"""
Handle authentication request from federated peer and send our location information.
"""
try:
if not Config.isConfigured():
util.debugPrint("Please configure system")
abort(500)
util.debugPrint("peerSignIn " + peerServerId + "/" + peerKey)
rc = authentication.authenticatePeer(peerServerId, peerKey)
# successfully authenticated? if so, return the location info for ALL
# sensors.
util.debugPrint("Status: " + str(rc))
retval = {}
if rc:
requestStr = request.data
if requestStr is not None:
jsonData = json.loads(requestStr)
Config.getPeers()
protocol = Config.getAccessProtocol()
peerUrl = protocol + "//" + jsonData["HostName"] + ":" + str(
jsonData["PublicPort"])
PeerConnectionManager.setPeerUrl(peerServerId, peerUrl)
PeerConnectionManager.setPeerSystemAndLocationInfo(
peerUrl, jsonData["locationInfo"])
retval["status"] = "OK"
retval["HostName"] = Config.getHostName()
retval["Port"] = Config.getPublicPort()
if not Config.isAuthenticationRequired():
locationInfo = GetLocationInfo.getLocationInfo()
retval["locationInfo"] = locationInfo
return jsonify(retval)
else:
retval["status"] = "NOK"
return jsonify(retval)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace(sys.exc_info())
raise
def signal_handler(signo, frame):
global jobs
print('Federation Server: Caught signal! Exiting.')
for job in jobs:
os.kill(job, signal.SIGINT)
time.sleep(1)
os.kill(job, signal.SIGKILL)
sys.exit(0)
os._exit(0)
if __name__ == '__main__':
global jobs
jobs = []
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
parser = argparse.ArgumentParser()
parser.add_argument("--pidfile", default=".federation.pid")
parser.add_argument("--logfile", default="/var/log/federation.log")
parser.add_argument("--username", default="spectrumbrowser")
parser.add_argument("--groupname", default="spectrumbrowser")
parser.add_argument("--daemon", default="True")
args = parser.parse_args()
isDaemon = args.daemon == "True"
global pidfile
pidfile = args.pidfile
if isDaemon:
import daemon
import daemon.pidfile
context = daemon.DaemonContext()
context.stdin = sys.stdin
context.stderr = open(args.logfile, 'a')
context.stdout = open(args.logfile, 'a')
context.uid = pwd.getpwnam(args.username).pw_uid
context.gid = pwd.getpwnam(args.groupname).pw_gid
print "Starting federation service"
Log.configureLogging("federation")
# There is a race condition here but it will do for us.
if os.path.exists(args.pidfile):
pid = open(args.pidfile).read()
try:
os.kill(int(pid), 0)
print "svc is running -- not starting"
sys.exit(-1)
os._exit(-1)
except:
print "removing pidfile and starting"
os.remove(args.pidfile)
context.pidfile = daemon.pidfile.TimeoutPIDLockFile(args.pidfile)
with context:
proc = Process(target=PeerConnectionManager.start)
proc.start()
jobs.append(proc.pid)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['CORS_HEADERS'] = 'Content-Type'
Log.loadGwtSymbolMap()
app.debug = True
server = pywsgi.WSGIServer(('localhost', 8002), app)
server.serve_forever()
else:
print "Starting federation service"
with util.pidfile(pidfile):
Log.configureLogging("federation")
proc = Process(target=PeerConnectionManager.start)
proc.start()
jobs.append(proc.pid)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['CORS_HEADERS'] = 'Content-Type'
Log.loadGwtSymbolMap()
app.debug = True
server = pywsgi.WSGIServer(('localhost', 8002), app)
server.serve_forever()
|
monitored_session_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import threading
import time
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.Variable(1, name='my_var')
variables.Variable(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.test_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.Variable([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.Variable([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.Variable([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
for _ in range(101): # 100 is default summary writing steps
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_on_aborted_error(self):
# Tests that we silently retry on abort. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, errors_impl.AbortedError(None, None, 'Abort'))
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]))
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]))
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
download_manager_test.py
|
# coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.download.download_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import json
import os
import re
import tempfile
import threading
import promise
import tensorflow as tf
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.download import download_manager as dm
from tensorflow_datasets.core.download import resource as resource_lib
ZIP = resource_lib.ExtractMethod.ZIP
TAR = resource_lib.ExtractMethod.TAR
NO_EXTRACT = resource_lib.ExtractMethod.NO_EXTRACT
def _get_promise_on_event(result=None, error=None):
"""Returns (event, Promise). Promise is fulfilled when `event.set()`."""
event = threading.Event()
def callback(resolve, reject):
def inside():
event.wait()
if error is not None:
reject(error)
resolve(result)
t = threading.Thread(target=inside)
t.daemon = True
t.start()
return event, promise.Promise(callback)
def _sha256(str_):
return hashlib.sha256(str_.encode('utf8')).hexdigest()
class DownloadManagerTest(tf.test.TestCase):
def _add_file(self, path, content='', mode='w'):
"""Returns open file handle."""
temp_f = tempfile.NamedTemporaryFile(mode=mode, delete=False)
self.files_content[path] = temp_f.name
temp_f.write(content)
temp_f.close()
self.existing_paths.append(path)
return temp_f
def setUp(self):
self.addCleanup(tf.compat.v1.test.mock.patch.stopall)
self.existing_paths = []
self.made_dirs = []
self.dl_results = {}
self.extract_results = {}
self.file_names = {} # resource fname -> original file name
def list_directory(path):
fname = os.path.basename(path).rsplit('.', 2)[0] # suffix is '.tmp.$uuid'
return [self.file_names.get(fname, 'file_with_no_ext')]
self.files_content = {}
def open_(path, mode='r'):
if 'w' in mode:
self._add_file(path)
return open(self.files_content[path], mode)
def rename(from_, to, overwrite=False):
del overwrite
if from_ in self.files_content:
self.existing_paths.append(to)
self.existing_paths.remove(from_)
self.files_content[to] = self.files_content.pop(from_)
self.gfile_patch = tf.compat.v1.test.mock.patch.object(
tf.io,
'gfile',
exists=lambda path: path in self.existing_paths,
makedirs=self.made_dirs.append,
# Used to get name of file as downloaded:
listdir=list_directory,
GFile=open_,
rename=tf.compat.v1.test.mock.Mock(side_effect=rename),
)
self.gfile = self.gfile_patch.start()
def tearDown(self):
self.gfile_patch.stop()
def _write_info(self, path, info):
content = json.dumps(info, sort_keys=True)
self._add_file(path, content)
def _get_manager(self, force_download=False, force_extraction=False,
checksums=None):
manager = dm.DownloadManager(
'my_dataset', '/dl_dir', '/extract_dir', '/manual_dir',
force_download=force_download, force_extraction=force_extraction,
checksums=checksums)
download = tf.compat.v1.test.mock.patch.object(
manager._downloader,
'download',
side_effect=lambda resource, tmpdir_path: self.dl_results[resource.url])
self.downloader_download = download.start()
extract = tf.compat.v1.test.mock.patch.object(
manager._extractor,
'extract',
side_effect=lambda resource, dest: self.extract_results[resource.path])
self.extractor_extract = extract.start()
return manager
def test_download(self):
"""One file in cache, one not."""
urls = {
'cached': resource_lib.Resource(url='http://a.ch/a'),
'new': resource_lib.Resource(url='https://a.ch/b'),
# INFO file of c has been deleted:
'info_deleted': resource_lib.Resource(url='https://a.ch/c'),
}
afname = resource_lib.Resource(url='http://a.ch/a').fname
bfname = resource_lib.Resource(url='https://a.ch/b').fname
cfname = resource_lib.Resource(url='https://a.ch/c').fname
_ = [self._add_file(path) for path in [
'/dl_dir/%s' % afname,
'/dl_dir/%s.INFO' % afname,
'/dl_dir/%s' % cfname,
]]
downloaded_b, self.dl_results['https://a.ch/b'] = _get_promise_on_event(
('sha_b', 10))
downloaded_c, self.dl_results['https://a.ch/c'] = _get_promise_on_event(
('sha_c', 10))
manager = self._get_manager()
downloaded_b.set()
downloaded_c.set()
downloads = manager.download(urls)
expected = {
'cached': '/dl_dir/%s' % afname,
'new': '/dl_dir/%s' % bfname,
'info_deleted': '/dl_dir/%s' % cfname,
}
self.assertEqual(downloads, expected)
def test_extract(self):
"""One file already extracted, one file with NO_EXTRACT, one to extract."""
resource_cached = resource_lib.Resource(path='/dl_dir/cached',
extract_method=ZIP)
resource_new = resource_lib.Resource(path='/dl_dir/new', extract_method=TAR)
resource_noextract = resource_lib.Resource(path='/dl_dir/noextract',
extract_method=NO_EXTRACT)
files = {
'cached': resource_cached,
'new': resource_new,
'noextract': resource_noextract,
}
self.existing_paths.append('/extract_dir/ZIP.%s' % resource_cached.fname)
extracted_new, self.extract_results['/dl_dir/%s' % resource_new.fname] = (
_get_promise_on_event('/extract_dir/TAR.new'))
manager = self._get_manager()
extracted_new.set()
res = manager.extract(files)
expected = {
'cached': '/extract_dir/ZIP.%s' % resource_cached.fname,
'new': '/extract_dir/TAR.%s' % resource_new.fname,
'noextract': '/dl_dir/%s' % resource_noextract.fname,
}
self.assertEqual(res, expected)
def test_extract_twice_parallel(self):
# Make sure calling extract twice on same resource actually does the
# extraction once.
extracted_new, self.extract_results['/dl_dir/foo.tar'] = (
_get_promise_on_event('/extract_dir/TAR.foo'))
manager = self._get_manager()
extracted_new.set()
out1 = manager.extract(['/dl_dir/foo.tar', '/dl_dir/foo.tar'])
out2 = manager.extract('/dl_dir/foo.tar')
expected = '/extract_dir/TAR.foo'
self.assertEqual(out1[0], expected)
self.assertEqual(out1[1], expected)
expected = '/extract_dir/TAR.foo'
self.assertEqual(out2, expected)
# Result is memoize so extract has only been called once
self.assertEqual(1, self.extractor_extract.call_count)
def test_download_and_extract(self):
url_a = 'http://a/a.zip'
url_b = 'http://b/b'
sha_contenta = _sha256('content from a.zip')
sha_contentb = _sha256('content from b')
resource_a = resource_lib.Resource(url=url_a)
resource_a.sha256 = sha_contenta
resource_b = resource_lib.Resource(url=url_b)
resource_b.sha256 = sha_contentb
self.file_names[resource_a.fname] = 'a.zip'
dl_a, self.dl_results[url_a] = _get_promise_on_event((sha_contenta, 10))
dl_b, self.dl_results[url_b] = _get_promise_on_event((sha_contentb, 10))
ext_a, self.extract_results['/dl_dir/%s' % resource_a.fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % resource_a.fname))
# url_b doesn't need any extraction.
for event in [dl_a, dl_b, ext_a]:
event.set()
manager = self._get_manager()
manager._checksums[url_a] = sha_contenta
manager._checksums[url_b] = sha_contentb
res = manager.download_and_extract({'a': url_a, 'b': url_b})
expected = {
'a': '/extract_dir/ZIP.%s' % resource_a.fname,
'b': '/dl_dir/%s' % resource_b.fname,
}
self.assertEqual(res, expected)
def test_download_and_extract_already_downloaded(self):
url_a = 'http://a/a.zip'
resource_a = resource_lib.Resource(url=url_a)
self.file_names[resource_a.fname] = 'a.zip'
# File was already downloaded:
self._add_file('/dl_dir/%s' % resource_a.fname)
self._write_info('/dl_dir/%s.INFO' % resource_a.fname,
{'original_fname': 'a.zip'})
ext_a, self.extract_results['/dl_dir/%s' % resource_a.fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % resource_a.fname))
ext_a.set()
manager = self._get_manager()
res = manager.download_and_extract(url_a)
expected = '/extract_dir/ZIP.%s' % resource_a.fname
self.assertEqual(res, expected)
def test_force_download_and_extract(self):
url = 'http://a/b.tar.gz'
resource_ = resource_lib.Resource(url=url)
resource_.sha256 = _sha256('content of file')
# resource was already downloaded / extracted:
self.existing_paths = ['/dl_dir/%s' % resource_.fname,
'/extract_dir/TAR_GZ.%s' % resource_.fname]
self.file_names[resource_.fname] = 'b.tar.gz'
self._write_info('/dl_dir/%s.INFO' % resource_.fname,
{'original_fname': 'b.tar.gz'})
dl_a, self.dl_results[url] = _get_promise_on_event((resource_.sha256, 10))
ext_a, self.extract_results['/dl_dir/%s' % resource_.fname] = (
_get_promise_on_event('/extract_dir/TAR_GZ.%s' % resource_.fname))
dl_a.set()
ext_a.set()
manager = self._get_manager(force_download=True, force_extraction=True,
checksums={url: resource_.sha256})
res = manager.download_and_extract(url)
expected = '/extract_dir/TAR_GZ.%s' % resource_.fname
self.assertEqual(expected, res)
# Rename after download:
(from_, to), kwargs = self.gfile.rename.call_args
self.assertTrue(re.match(
r'/dl_dir/%s\.tmp\.[a-h0-9]{32}/b.tar.gz' % resource_.fname, from_))
self.assertEqual('/dl_dir/%s' % resource_.fname, to)
self.assertEqual(kwargs, {'overwrite': True})
self.assertEqual(1, self.downloader_download.call_count)
self.assertEqual(1, self.extractor_extract.call_count)
def test_wrong_checksum(self):
url = 'http://a/b.tar.gz'
sha_a = _sha256('content a')
sha_b = _sha256('content b')
dl_a, self.dl_results[url] = _get_promise_on_event((sha_a, 10))
dl_a.set()
manager = self._get_manager(checksums={url: sha_b})
with self.assertRaises(dm.NonMatchingChecksumError):
manager.download(url)
self.assertEqual(0, self.extractor_extract.call_count)
if __name__ == '__main__':
tf.test.main()
|
test_main_loop.py
|
import os
import signal
import time
from itertools import count
from multiprocessing import Process
from fuel.datasets import IterableDataset
from mock import MagicMock
from numpy.testing import assert_raises
from six.moves import cPickle
from blocks.main_loop import MainLoop
from blocks.extensions import TrainingExtension, FinishAfter, Printing
from blocks.utils import unpack
from blocks.config import config
from tests import MockAlgorithm, MockMainLoop
class WriteBatchExtension(TrainingExtension):
"""Writes data saved by MockAlgorithm to the log."""
def after_batch(self, _):
self.main_loop.log.current_row['batch'] = \
self.main_loop.algorithm.batch
def test_main_loop():
old_config_profile_value = config.profile
config.profile = True
main_loop = MainLoop(
MockAlgorithm(), IterableDataset(range(10)).get_example_stream(),
extensions=[WriteBatchExtension(), FinishAfter(after_n_epochs=2)])
main_loop.run()
assert_raises(AttributeError, getattr, main_loop, 'model')
assert main_loop.log.status['iterations_done'] == 20
assert main_loop.log.status['_epoch_ends'] == [10, 20]
assert len(main_loop.log) == 20
for i in range(20):
assert main_loop.log[i + 1]['batch'] == {'data': i % 10}
config.profile = old_config_profile_value
def test_training_resumption():
def do_test(with_serialization):
data_stream = IterableDataset(range(10)).get_example_stream()
main_loop = MainLoop(
MockAlgorithm(), data_stream,
extensions=[WriteBatchExtension(),
FinishAfter(after_n_batches=14)])
main_loop.run()
assert main_loop.log.status['iterations_done'] == 14
if with_serialization:
main_loop = cPickle.loads(cPickle.dumps(main_loop))
finish_after = unpack(
[ext for ext in main_loop.extensions
if isinstance(ext, FinishAfter)], singleton=True)
finish_after.add_condition(
["after_batch"],
predicate=lambda log: log.status['iterations_done'] == 27)
main_loop.run()
assert main_loop.log.status['iterations_done'] == 27
assert main_loop.log.status['epochs_done'] == 2
for i in range(27):
assert main_loop.log[i + 1]['batch'] == {"data": i % 10}
do_test(False)
do_test(True)
def test_training_interrupt():
def process_batch(batch):
time.sleep(0.1)
algorithm = MockAlgorithm()
algorithm.process_batch = process_batch
main_loop = MockMainLoop(
algorithm=algorithm,
data_stream=IterableDataset(count()).get_example_stream(),
extensions=[Printing()]
)
p = Process(target=main_loop.run)
p.start()
time.sleep(0.1)
os.kill(p.pid, signal.SIGINT)
time.sleep(0.1)
assert p.is_alive()
os.kill(p.pid, signal.SIGINT)
time.sleep(0.2)
assert not p.is_alive()
p.join()
def test_error():
ext = TrainingExtension()
ext.after_batch = MagicMock(side_effect=KeyError)
ext.on_error = MagicMock()
main_loop = MockMainLoop(extensions=[ext, FinishAfter(after_epoch=True)])
assert_raises(KeyError, main_loop.run)
ext.on_error.assert_called_once_with()
assert 'got_exception' in main_loop.log.current_row
ext.on_error = MagicMock(side_effect=AttributeError)
main_loop = MockMainLoop(extensions=[ext, FinishAfter(after_epoch=True)])
assert_raises(KeyError, main_loop.run)
ext.on_error.assert_called_once_with()
assert 'got_exception' in main_loop.log.current_row
|
handlers.py
|
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError:
threading = None
try:
import codecs
except ImportError:
codecs = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
self.mode = 'w'
self.stream = self._open()
currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = pickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=socket.SOCK_DGRAM):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = 1
self._connect_unixsocket(address)
else:
self.unixsocket = 0
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.formatter = None
def _connect_unixsocket(self, address):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
if self.append_nul:
msg += '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
if codecs:
msg = codecs.BOM_UTF8 + msg
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders(data if self.method == "POST" else None)
h.getresponse() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self)
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.queue.put_nowait(self._sentinel)
self._thread.join()
self._thread = None
|
timer.py
|
import time
import threading
import ipywidgets as ipw
class Timer():
"""Class for scheduling periodic callbacks.
Timer class for periodically passing new data from a generator to a
callback function. Useful for passing data from DMA transfers back to a
visualisation function.
"""
def __init__(self, callback, t):
"""Create new dma-based data timer.
callback: function to call with data chunk
t: time between each function call
"""
self.callback = callback
self.t = t
self.stopping = True
def _do(self):
"""Generate new data and restart timer thread.
Should never be run directly. use `start()` instead.
"""
while not self.stopping:
next_timer = time.time() + self.t
self.callback()
sleep_time = next_timer - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
def start(self):
"""Start the data generator thread."""
if self.stopping:
self.stopping = False
thread = threading.Thread(target=self._do)
thread.start()
def stop(self):
"""Stop a running data generator thread.
Does not need a lock, since the spawned timer thread will only read `self.stopping`.
"""
self.stopping = True
def get_widget(self):
"""Get ipywidget controls to stop and start the generator thread."""
button_layout = ipw.Layout(margin='auto')
start_button = ipw.Button(description=u'\u25B6', layout=button_layout)
start_button.on_click(lambda _: self.start())
stop_button = ipw.Button(description=u'\u25A0', layout=button_layout)
stop_button.on_click(lambda _: self.stop())
return ipw.HBox([start_button, stop_button])
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import sys
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER, no_fastcomp, no_wasm_backend, create_test_file, parameterized
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
if sys.version_info.major == 2:
from urllib import urlopen
else:
from urllib.request import urlopen
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
@no_wasm_backend('wasm source maps')
def test_emscripten_log(self):
# TODO: wasm support for source maps. emscripten_loadSourceMap looks at $HTML.map but it should be $NAME.wasm.map.
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
self.compile_btest([src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
run_process(['doit.bat'])
else:
run_process(['sh', './doit.sh'])
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
os.makedirs('assets/sub/asset1/'.replace('\\', '/'))
os.makedirs('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir('dirrey')
except OSError:
pass
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
self.compile_btest(['src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except OSError:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
os.makedirs(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs('subdirr')
os.makedirs('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.compile_btest([
'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
if self.is_wasm_backend():
return ['-s', 'ASYNCIFY']
else:
return ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
self.compile_btest(['sdl_key.c', '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
self.compile_btest(['sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
self.compile_btest(['test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
self.compile_btest(['sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
self.compile_btest(['-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
self.compile_btest(['-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def do_test_worker(self, args=[]):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20:' + ('data%20for%20w' if file_data else '') + ':')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
def test(args):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + args)
# test normally
test([])
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
test(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
# NOTE: Should FULL_ES3=1 imply client-side vertex arrays? The emulation needs FULL_ES2=1 for now.
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'USE_WEBGL2=1', '-s', 'FULL_ES2=1', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
self.compile_btest([path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
if not wasm and self.is_wasm_backend():
continue
print(wasm)
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
self.compile_btest(['supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [['-s', 'WASM=0'], ['-s', 'WASM=1']]:
if 'WASM=0' in mode and self.is_wasm_backend():
continue
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
self.compile_btest([path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args_base = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--no_private_browsing', '--port', '6941']
]:
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.compile_btest(['-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
[],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'USE_WEBGL2=1', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
@no_wasm_backend('asm.js-specific')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
# in the emterpreter, check the special assertions mode as well
if not self.is_wasm_backend():
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASSERTIONS=1'] + self.get_async_args())
def test_locate_file(self):
for wasm in ([0, 1] if not self.is_wasm_backend() else [1]):
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION=1'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@no_wasm_backend('asm.js')
def test_asm_swapping(self):
self.clear()
create_test_file('run.js', r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
create_test_file('second.cpp', self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
self.compile_btest(['second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
self.assertExists('second.js')
if SPIDERMONKEY_ENGINE in JS_ENGINES:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.compile_btest([
'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
for defines in [[]]:
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
self.compile_btest(['sdl2_key.c', '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
self.compile_btest(['sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'TOTAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
self.compile_btest(['sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), 'sound.ogg')
self.btest('sdl2_mixer.c', expected='1', args=['--preload-file', 'sound.ogg', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@no_fastcomp('emterpretify is not compatible with threads')
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad_2(self):
for opts in [0, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions, '-g'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@no_wasm_backend('emterpretify - specific behavior wrt other async calls being paused or not')
def test_emterpreter_async_with_manual(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'])
@no_wasm_backend('emterpretify - yielding behavior')
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
@no_wasm_backend('emterpretify - safe-heap specific issues')
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@no_wasm_backend('emterpretify - yield-specific')
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
@no_fastcomp('emterpretify never worked here')
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS=1'])
@no_fastcomp('wasm backend asyncify specific')
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
@requires_sync_compilation
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize TOTAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed_wasm(self):
self._test_dylink_dso_needed(1, 0)
def test_dylink_dso_needed_wasm_inworker(self):
self._test_dylink_dso_needed(1, 1)
def test_dylink_dso_needed_asmjs(self):
self._test_dylink_dso_needed(0, 0)
def test_dylink_dso_needed_asmjs_inworker(self):
self._test_dylink_dso_needed(0, 1)
@no_wasm_backend('https://github.com/emscripten-core/emscripten/issues/8753')
@requires_sync_compilation
def _test_dylink_dso_needed(self, wasm, inworker):
# here we reuse runner._test_dylink_dso_needed, but the code is run via browser.
print('\n# wasm=%d inworker=%d' % (wasm, inworker))
self.set_setting('WASM', wasm)
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
super(browser, self)._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@requires_threads
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
args = opt + debug + f32
print(args)
if self.is_wasm_backend() and '--separate-asm' in args or 'AGGRESSIVE_VARIABLE_ELIMINATION=1' in args:
continue
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args)
test([])
test(['-O3'])
test(['-s', 'MODULARIZE_INSTANCE=1'])
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-std=c++11', '-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
@requires_threads
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_HINT_NUM_CORES=2'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test that --separate-asm works with -s USE_PTHREADS=1.
@no_wasm_backend('asm.js')
@requires_threads
def test_pthread_separate_asm_pthreads(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'] + modularize)
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
if self.is_wasm_backend():
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'TOTAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME=1'], also_asmjs=True)
# Test that STACK_BASE and STACK_MAX correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS', '-std=c++11'])
@no_fastcomp('-s SAFE_STACK is only supported on WASM backend')
@requires_threads
def test_pthread_safe_stack(self):
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'SAFE_STACK', '-s', 'DEFAULT_PTHREAD_STACK_SIZE=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('LSan is only supported on WASM backend')
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('ASan is only supported on WASM backend')
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@no_fastcomp('ASan is only supported on WASM backend')
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
@no_wasm_backend('mem init file')
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
@no_wasm_backend('mem init file')
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
@no_wasm_backend('asm.js')
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
create_test_file('one.html', '<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
create_test_file('two.html', '''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
self.assertExists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
@no_wasm_backend('emterpretify - bytecode in a file')
def test_emterpretify_file(self):
create_test_file('shell.html', '''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
self.compile_btest(['src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
self.clear()
os.makedirs('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
@no_fastcomp('not optimized in fastcomp')
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5651), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'USE_WEBGL2=0', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'USE_WEBGL2=0'],
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'USE_WEBGL2=1'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
if asyncify:
if not self.is_wasm_backend():
continue
# given the synchronous render loop here, asyncify is needed to see intermediate frames and the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_wasm_backend("fetch API uses an asm.js based web worker to run synchronous XHRs and IDB operations")
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3', '--separate-asm'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '--separate-asm', '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9062')
@requires_threads
def test_pthread_growth_mainthread(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'MODULARIZE_INSTANCE=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9065')
@requires_threads
def test_pthread_growth(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
self.compile_btest(['src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1']
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = ['src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.compile_btest(['test.c', '-o', 'test.html', '-O3'])
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js', '-O3'] + args)
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
if not os.path.exists(filesystem_path):
os.makedirs(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_modularize_Module_input(self):
self.btest(path_from_root('tests', 'browser', 'modularize_Module_input.cpp'), '0', args=['--shell-file', path_from_root('tests', 'browser', 'modularize_Module_input.html'), '-s', 'MODULARIZE_INSTANCE=1'])
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-std=c++11', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that it is possible to load two asm.js compiled programs to one page when both --separate-asm and MODULARIZE=1 is used, by assigning
# the pages different asm module names to ensure they do not conflict when being XHRed in.
@no_wasm_backend('this tests asm.js support')
def test_two_separate_asm_files_on_same_page(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'two_separate_asm_files.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page1.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module1', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage1["asm"]']
print(cmd)
subprocess.check_call(cmd)
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page2.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module2', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage2["asm"]']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
# Tests that it is possible to encapsulate asm.js compiled programs by using --separate-asm + MODULARIZE=1. See
# encapsulated_asmjs_page_load.html for the example.
@no_wasm_backend('this tests asm.js support')
def test_encapsulated_asmjs_page_load(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'encapsulated_asmjs_page_load.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'a.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=EmscriptenCode', '-s', 'SEPARATE_ASM_MODULE_NAME="var EmscriptenCode"']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
@requires_threads
@no_fastcomp('offset converter is not supported on fastcomp')
def test_offset_converter(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
|
mesh_pool.py
|
import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
from models.layers.mesh_prepare import get_edge_faces
import math
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
QEM, New_vertice = self.build_QEM(mesh, mesh.edges_count)
queue = self.__build_queue1(QEM, mesh.edges_count)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.bool)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
while mesh.edges_count > self.__out_target:
value, edge_id = heappop(queue)
edge_id = int(edge_id)
newvertice = New_vertice[edge_id]
if newvertice == []:
continue
# print('len',edge_id, len(newvertice),file=data)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups, newvertice)
# print('after', mesh.edges_count)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
#fe=extract_features1(mask,mesh)
#fe1 = edge_groups.rebuild_features(fe, mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups, new_vertex):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0) \
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id, new_vertex)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
# 首先,gemm是不是跟着faces变了
# 第二个问题,是不是在折叠的时候,gemm改变了,是不是顺着我们的想法改变的
# 第三个问题,为什么在原来的程序中,没有这种问题,需要打印出来求证
# 第四个,解决方法,如果以上方式都不奏效,那么可以利用build_gemm重构edge,face等数组
@staticmethod
def __pool_face(mesh):
np.set_printoptions(threshold=10000000000000000)
faces = set()
faces2=[]
gemm = np.array(mesh.gemm_edges)
for edge_index in range(len(gemm)):
gem = gemm[edge_index]
for i in range(2):
otheredge = [gem[i * 2], gem[i * 2 + 1]]
face = set()
face.add(mesh.edges[otheredge[0]][0])
face.add(mesh.edges[otheredge[0]][1])
face.add(mesh.edges[otheredge[1]][0])
face.add(mesh.edges[otheredge[1]][1])
face=list(face)
face.sort()
face_normals = np.cross(mesh.vs[face[1]] - mesh.vs[face[0]],
mesh.vs[face[2]] - mesh.vs[face[1]])
face_areas = np.sqrt((face_normals ** 2).sum())
if face_areas==0.0:
print(face)
face2 = []
face=tuple(face)
face2.append(face)
face2=set(face2)
#print('face',face2)
faces=faces|face2
#print('faces',faces)
# if face in faces:
# continue
# else:
# faces.append(face)
#edge_count, edge_faces, edges_dict = get_edge_faces(faces)
#edge_count, edges, edges_dict=MeshPool.get_edge(faces)
# print(abcbdbdb)
#mesh.edges = edges
faces = list(faces)
return faces#, edge_count, edges_dict
@staticmethod
def __get_edge(mesh):
edge_count = 0
edge2keys = dict()
for i in range(len(mesh.edges)):
cur_edge=tuple(sorted((mesh.edges[i][0],mesh.edges[i][1])))
# if cur_edge not in edge2keys:
edge2keys[cur_edge] = edge_count
edge_count += 1
return edge_count, edge2keys
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1],
mesh.sides[key_b, other_side_b + 1])
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b,
MeshPool.__get_other_side(update_side_b))
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b)
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert (len(vertex) == 1)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
# delete edges with smallest norm
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
def __build_queue1(self, QEM, edges_count):
QEMM = torch.tensor(QEM, device=self.__fe.device, dtype=torch.float32).unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=self.__fe.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((QEMM, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
def build_QEM(self, mesh, edges_count): # 与update_mesh一致
QEM = [0 for i in range(edges_count)]
Newvertice = [[0,0,0] for i in range(edges_count)]
Q = [[0 for i in range(10)] for j in range(len(mesh.vs))]
faces = MeshPool.__pool_face(mesh)
edge_count, edges_dict= MeshPool.__get_edge(mesh)
np.set_printoptions(threshold=10000000000000000)
for i in range(len(faces)):
p = []
face = faces[i]
#print('face',face)
# print(face)
for j in range(3):
p.append(mesh.vs[face[j]])
new1 = MeshPool.cross(p[1] - p[0], p[2] - p[0])
if (new1[0] == 0):
continue
new = MeshPool.normarlize(new1)
if (math.isnan(new[0])):
continue
q1 = MeshPool.getm(new[0], new[1], new[2], -(new[0] * p[0][0] + new[1] * p[0][1] + new[2] * p[0][2]))
for j in range(3):
for k in range(10):
if math.isnan(q1[k]):
print('q1chuwentile')
Q[face[j]][k] = Q[face[j]][k] + q1[k]
for f in faces:
p_result = [0, 0, 0]
for i in range(3):
err, new_vertex = MeshPool.calculate_error(mesh, Q, f[i], f[(i + 1) % 3], p_result)
edge = tuple(sorted((f[i], f[(i + 1) % 3])))
edges=mesh.edges.tolist()
if edge in mesh.edges: # 问题出在这
edge_id = edges_dict[edge]
QEM[edge_id] = err
if Newvertice[edge_id][0]==0.0:
Newvertice[edge_id][0] = new_vertex[0]
Newvertice[edge_id][1] = new_vertex[1]
Newvertice[edge_id][2] = new_vertex[2]
else:
continue
return QEM, Newvertice
@staticmethod
def __vertex_error(q, x, y, z):
error = q[0] * x * x + 2 * q[1] * x * y + 2 * q[2] * x * z + 2 * q[3] * x + q[4] * y * y \
+ 2 * q[5] * y * z + 2 * q[6] * y + q[7] * z * z + 2 * q[8] * z + q[9]
return error
@staticmethod
def calculate_error(mesh, Q, id_v1, id_v2, p_result):
q = [0 for i in range(10)]
for i in range(10):
q[i] = Q[id_v1][i] + Q[id_v2][i]
d = MeshPool.det(q, 0, 1, 2, 1, 4, 5, 2, 5, 7)
if d != 0:
p_result[0] = -1 / d * (MeshPool.det(q, 1, 2, 3, 4, 5, 6, 5, 7, 8))
p_result[1] = 1 / d * (MeshPool.det(q, 0, 2, 3, 1, 5, 6, 2, 7, 8))
p_result[2] = -1 / d * (MeshPool.det(q, 0, 1, 3, 1, 4, 6, 2, 5, 8))
error = MeshPool.__vertex_error(q, p_result[0], p_result[1], p_result[2])
else:
p1 = mesh.vs[id_v1]
p2 = mesh.vs[id_v2]
p3 = [0, 0, 0]
p3[0] = (p1[0] + p2[0]) / 2
p3[1] = (p1[1] + p2[1]) / 2
p3[2] = (p1[2] + p2[2]) / 2
error1 = MeshPool.__vertex_error(q, p1[0], p1[1], p1[2])
error2 = MeshPool.__vertex_error(q, p2[0], p2[1], p2[2])
error3 = MeshPool.__vertex_error(q, p3[0], p3[1], p3[2])
error = min(error1, min(error2, error3))
if error == error1:
p_result[0] = p1[0]
p_result[1] = p1[1]
p_result[2] = p1[2]
if error == error2:
p_result[0] = p2[0]
p_result[1] = p2[1]
p_result[2] = p2[2]
if error == error3:
p_result[0] = p3[0]
p_result[1] = p3[1]
p_result[2] = p3[2]
return error, p_result
@staticmethod
def min(a, b):
if (a < b):
return a
else:
return b
@staticmethod
def cross(a, b):
x = a[1] * b[2] - a[2] * b[1]
y = a[2] * b[0] - a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
new = []
new.append(x)
new.append(y)
new.append(z)
return new
@staticmethod
def normarlize(a):
square = (a[0] * a[0] + a[1] * a[1] + a[2] * a[2]) ** 0.5
a[0] = a[0] / square
a[1] = a[1] / square
a[2] = a[2] / square
return a
@staticmethod
def getm(*args):
m = [0 for i in range(10)]
if len(args) == 10:
for i in range(10):
m[i] = args[i]
elif len(args) == 4:
m[0] = (args[0] * args[0])
m[1] = (args[0] * args[1])
m[2] = (args[0] * args[2])
m[3] = (args[0] * args[3])
m[4] = (args[1] * args[1])
m[5] = (args[1] * args[2])
m[6] = (args[1] * args[3])
m[7] = (args[2] * args[2])
m[8] = (args[2] * args[3])
m[9] = (args[3] * args[3])
if math.isnan(m[0]):
print('yuanshuju', args)
return m
@staticmethod
def det(*args):
m = args[0]
a11 = args[1]
a12 = args[2]
a13 = args[3]
a21 = args[4]
a22 = args[5]
a23 = args[6]
a31 = args[7]
a32 = args[8]
a33 = args[9]
det = m[a11] * m[a22] * m[a33] + m[a13] * m[a21] * m[a32] \
+ m[a12] * m[a23] * m[a31] - m[a13] * m[a22] * m[a31] \
- m[a11] * m[a23] * m[a32] - m[a12] * m[a21] * m[a33]
return det
@staticmethod
def operationadd(m, n):
return MeshPool.getm(m[0] + n[0], m[1] + n[1], m[2] + n[2],
m[3] + n[3], m[4] + n[4], m[5] + n[5], m[6] + n[6], m[7] + n[7], m[8] + n[8],
m[9] + n[9])
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
|
test_callbacks.py
|
import os
import sys
import multiprocessing
import numpy as np
import pytest
from keras import optimizers
np.random.seed(1337)
from keras import callbacks
from keras.models import Sequential
from keras.layers.core import Dense
from keras.utils.test_utils import get_test_data
from keras import backend as K
from keras.utils import np_utils
input_dim = 2
nb_hidden = 4
nb_class = 2
batch_size = 5
train_samples = 20
test_samples = 20
def test_ModelCheckpoint():
filepath = 'checkpoint.h5'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
def test_EarlyStopping():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
def test_EarlyStopping_reuse():
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
def test_LearningRateScheduler():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(
nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
def data_generator_graph(train):
while 1:
if train:
yield {'X_vars': X_train, 'output': y_train}
else:
yield {'X_vars': X_test, 'output': y_test}
# case 1 Sequential
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
cbks = [tsb]
# fit with validation data
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
# fit generator with validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
def test_LambdaCallback():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
p.join()
assert not p.is_alive()
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard_with_ReduceLROnPlateau():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
if __name__ == '__main__':
pytest.main([__file__])
|
voiceassistant.py
|
# Text-to-Speech
from gtts import gTTS
from io import BytesIO
from pydub import AudioSegment
from pydub.playback import play
# Speech recognition
import speech_recognition as sr
# Hotword detection
import snowboy.snowboydecoder as snowboydecoder
from definitions import SNOWBOY_MODEL_PATH
import threading
# Fuzzy logic
from fuzzywuzzy import fuzz
# DBus
from pydbus import SessionBus
from gi.repository import GLib
# Model
from src.model.datamanager import Command
from src.model.datamanager import Contacts
# GMail
from src.functions.googlemail import VAMailGoogle
# Google Calendar
from src.functions.googlecalendar import VACalendarGoogle
# Weather
from src.functions.weather import VAWeather
# Time related functions
import datetime
import time
# Misc.
import random
class DBusService(object):
"""
<node>
<interface name='org.LinuxAssistantServer'>
<method name='client_init'>
<arg type='b' name='response' direction='out'/>
</method>
<method name='wakeup_call'>
</method>
<method name='echo_string'>
<arg type='s' name='a' direction='in'/>
<arg type='s' name='response' direction='out'/>
</method>
<method name='quit'/>
</interface>
</node>
"""
def client_init(self):
return True
def wakeup_call(self):
if VoiceAssistant.assistant_is_busy is False:
VoiceAssistant.wakeup_response()
else:
print("assistant is busy")
def echo_string(self, s):
"""returns whatever is passed to it"""
print(s)
return s
class VoiceAssistant(object):
assistant_is_busy = False
@staticmethod
def say(text, lang='ru', client=None):
mp3_fp = BytesIO()
tts = gTTS(text, lang)
tts.write_to_fp(mp3_fp)
text_audio = AudioSegment.from_file(BytesIO(mp3_fp.getvalue()), format="mp3")
if client is not None:
client.print_text(text, False)
play(text_audio)
@staticmethod
def recognize(lang='ru-RU', client=None):
recognizer = sr.Recognizer()
mic = sr.Microphone()
with mic as source:
recognizer.dynamic_energy_threshold = True
recognizer.adjust_for_ambient_noise(source, duration=2)
if client is not None:
client.print_text("Говорите...", False)
print("Говорите...")
audio = recognizer.listen(source)
if client is not None:
client.print_text("Распознаю...", False)
print("Распознаю...")
query = recognizer.recognize_google(audio, language=lang)
if client is not None:
client.print_text(query, True)
return query
@staticmethod
def identify_command(cmd_text):
identified_command = {'cmd': '', 'percent': 0}
commands = Command.get_commands()
for identifier, triggers_vector in commands:
for string in triggers_vector:
fuzzy_ratio = fuzz.ratio(cmd_text, string)
if fuzzy_ratio > identified_command['percent']:
identified_command['cmd'] = identifier
identified_command['percent'] = fuzzy_ratio
return identified_command['cmd']
@staticmethod
def identify_date(voice_input):
if voice_input == 'сегодня':
today = datetime.datetime.now().date()
return {
'day': today.day,
'month': today.month,
'year': today.year
}
else:
print(voice_input)
voice_input = voice_input.split()
months = ['январь', 'февраль', 'март', 'аплель', 'май', 'июнь', 'июль', 'август', 'сентябрь', 'октябрь', 'ноябрь', 'декабрь']
identified_month = {'month': '', 'percent': 0}
for month in months:
fuzzy_ratio = fuzz.ratio(voice_input[1], month)
if fuzzy_ratio > identified_month['percent']:
identified_month['percent'] = fuzzy_ratio
identified_month['month'] = month
day = voice_input[0]
month = str(months.index(identified_month['month']) + 1)
year = voice_input[2]
return {
'day': day,
'month': month,
'year': year
}
@staticmethod
def ask_event_info(dbus_client=None):
VoiceAssistant.say("Укажите дату вашей встречи, в формате число месяц год.", client=dbus_client)
voice_str = VoiceAssistant.recognize(client=dbus_client)
event_day = VoiceAssistant.identify_date(voice_str)
VoiceAssistant.say("Укажите время встречи.", client=dbus_client)
event_time = VoiceAssistant.recognize(client=dbus_client)
start_time = f'{event_day["year"]}-{event_day["month"]}-{event_day["day"]}T{event_time}'
start_time = datetime.datetime.strptime(start_time, "%Y-%m-%dT%H:%M")
VoiceAssistant.say("Сколько продлится встреча?", client=dbus_client)
event_duration = VoiceAssistant.recognize(client=dbus_client)
event_duration = datetime.datetime.strptime(event_duration, "%H:%M").time()
duration = datetime.timedelta(hours=event_duration.hour, minutes=event_duration.minute)
end_time = start_time + duration
start_time = str(start_time) + ':00+03:00'
end_time = str(end_time) + ':00+03:00'
VoiceAssistant.say("Как мне назвать встречу.", client=dbus_client)
event_name = VoiceAssistant.recognize(client=dbus_client)
add_description = VoiceAssistant.ask("Добавить ли описание?", client=dbus_client)
event_description = None
if add_description is True:
VoiceAssistant.say("Что добавить в описание встречи?", client=dbus_client)
event_description = VoiceAssistant.recognize(client=dbus_client)
add_attendees = VoiceAssistant.ask("Пригласить ли других участников?", client=dbus_client)
attendees = []
while add_attendees is True:
VoiceAssistant.say("Кого мне добавить к этой встрече?", client=dbus_client)
voice_str = VoiceAssistant.recognize(client=dbus_client)
contact_info = VoiceAssistant.identify_contact(voice_str)
attendees.append({
'email': contact_info['email']
})
add_attendees = VoiceAssistant.ask("Пригласить ли других участников?", client=dbus_client)
return {
'summary': event_name,
'start_time': str(start_time),
'end_time': str(end_time),
'description': event_description,
'attendees': attendees
}
@staticmethod
def ask_email_info(dbus_client=None):
VoiceAssistant.say("Кому отправить письмо?", client=dbus_client)
voice_input = VoiceAssistant.recognize(client=dbus_client)
contact = VoiceAssistant.identify_contact(voice_input)
VoiceAssistant.say("Какая тема письма?", client=dbus_client)
subject = VoiceAssistant.recognize(client=dbus_client)
VoiceAssistant.say("Содержание письма?",client=dbus_client)
message = VoiceAssistant.recognize(client=dbus_client)
return contact['name'], contact['email'], subject, message
@staticmethod
def ask(question, client=None):
VoiceAssistant.say(question, client=client)
while True:
response = VoiceAssistant.recognize(client=client).lower()
print(f'response: {response}')
if response == "да":
return True
elif response == "нет":
return False
else:
VoiceAssistant.say("Прошу прощения, но я вас не расслышала, не могли бы повторить?", client=client)
@staticmethod
def identify_contact(input_str):
identified_contact = {'name': '', 'percent': 0}
contacts = Contacts.get_contacts()
for name in contacts:
fuzzy_ratio = fuzz.ratio(name, input_str)
if fuzzy_ratio > identified_contact['percent']:
identified_contact['name'] = name
identified_contact['percent'] = fuzzy_ratio
name = identified_contact['name']
email = contacts[name]
return {
'name': name,
'email': email
}
@staticmethod
def execute_command(cmd_str, dbus_client=None):
if cmd_str == 'unread_email':
gmail = VAMailGoogle()
messages = gmail.get_unread('in:inbox is:unread')
VoiceAssistant.say(f"У вас {len(messages)} непрочитанных сообщений", client=dbus_client)
# response = None
# if dbus_client is not None:
response = VoiceAssistant.ask("Хотите, чтобы я прочитала от кого они и темы писем?", client=dbus_client)
# else:
# response = VoiceAssistant.ask("Хотите, чтобы я прочитала от кого они и темы писем?")
if response is True:
for message in messages:
date = message['Date']
date = utc_to_local(datetime.datetime.strptime(date, '%d %b %Y %X %z'))
date_str = '{:"%d/%m/%Y"}'.format(date)
VoiceAssistant.say(f"{date_str} в {date.time()} вам прикло письмо от {message['From']} с темой {message['Subject']}")
elif response is False:
if dbus_client is not None:
VoiceAssistant.say("Хорошо.", client=dbus_client)
else:
VoiceAssistant.say("Хорошо.")
if cmd_str == 'send_email':
gmail = VAMailGoogle()
name, email_address, subject, message = VoiceAssistant.ask_email_info(dbus_client=dbus_client)
print(email_address, subject, message)
VoiceAssistant.say(f'Вы хотите отправить письмо контакту {name}, с темой {subject}, и содержанием {message}.Все верно?', client=dbus_client)
resp = VoiceAssistant.recognize(client=dbus_client)
if resp == "да":
raw_email = gmail.create_email(email_address, subject, message)
gmail.send_email(raw_email)
dbus_client.print_text("Письмо было отправлено", False)
print("Email has been sent.")
else:
dbus_client.print_text("Письмо не было отправлено", False)
print("Email has not been sent")
if cmd_str == 'events_day':
gcal = VACalendarGoogle()
today = datetime.date.today()
events = gcal.get_events_on_a_day(today)
if events != []:
VoiceAssistant.say(f"На сегодня у вас запланировано {len(events)} событий.", client=dbus_client)
for event in events:
event_date_info = event["Start"]["dateTime"]
event_date_info = event_date_info[:event_date_info.index('+')]
event_time = datetime.datetime.strptime(event_date_info, "%Y-%m-%dT%H:%M:%S").time()
VoiceAssistant.say(f"В {event_time} у вас {event['Summary']}", client=dbus_client)
if cmd_str == 'add_event':
gcal = VACalendarGoogle()
data = VoiceAssistant.ask_event_info(dbus_client=dbus_client)
send_invites = 'none'
if data['attendees'] != []:
send_invites = 'all'
VoiceAssistant.say(f'Вы хотите добавить событие с названием {data["summary"]}. Верно?', client=dbus_client)
resp = VoiceAssistant.recognize(client=dbus_client)
if resp == "да":
gcal.add_event(data['summary'], data['start_time'], data['end_time'], send_invites,
description=data['description'], attendees=data['attendees'])
dbus_client.print_text("Встреча была создана", False)
print("Event has been created.")
else:
dbus_client.print_text("Создание встречи отменено", False)
print("Event has not been created")
if cmd_str == 'time':
now = datetime.datetime.now()
if dbus_client is not None:
VoiceAssistant.say(f'Сейчас {now.strftime("%H")}:{now.strftime("%M")}', client=dbus_client)
else:
VoiceAssistant.say(f'Сейчас {now.hour}:{now.minute}')
if cmd_str == 'weather':
weather = VAWeather()
if dbus_client is not None:
VoiceAssistant.say(weather.get_weather(), client=dbus_client)
else:
VoiceAssistant.say(weather.get_weather())
if cmd_str == 'help':
if dbus_client is not None:
VoiceAssistant.say("Я могу помочь вам узнать погоду и время."
" Также я могу отправить сообщение по почте "
"и создать событие в календаре", client=dbus_client)
@staticmethod
def greeter():
greetings = [
'Чем могу помочь?',
'Я вас слушаю...'
]
return random.choice(greetings)
@staticmethod
def wakeup_response():
# Make threading safe
lock = threading.Lock()
lock.acquire()
VoiceAssistant.assistant_is_busy = True
dbus_client = None
try:
client_bus = SessionBus()
dbus_client = client_bus.get("org.LinuxAssistantClient")
except:
print("[log] Can't connect to the client")
VoiceAssistant.say(VoiceAssistant.greeter(), client=dbus_client)
voice_cmd = None
try:
voice_cmd = VoiceAssistant.recognize(client=dbus_client)
except sr.UnknownValueError:
print("[log] Голос не распознан!")
except sr.RequestError as e:
print("[log] Неизвестная ошибка, проверьте интернет!")
if voice_cmd is not None:
print("[log] Распознано: " + voice_cmd)
voice_cmd = voice_cmd.lower()
for word in opts['alias']:
voice_cmd = voice_cmd.replace(word, '').strip()
for word in opts['tbr']:
voice_cmd = voice_cmd.replace(word, '').strip()
cmd = VoiceAssistant.identify_command(voice_cmd)
VoiceAssistant.execute_command(cmd, dbus_client=dbus_client)
lock.release()
VoiceAssistant.assistant_is_busy = False
opts = {
"alias": ('алиса', 'алисочка', 'леся'),
"tbr": ('скажи', 'расскажи', 'покажи', 'сколько', 'произнеси')
}
def utc_to_local(utc):
epoch = time.mktime(utc.timetuple())
offset = datetime.datetime.fromtimestamp(epoch) - datetime.datetime.utcfromtimestamp(epoch)
return utc + offset
def detected_callback():
if VoiceAssistant.assistant_is_busy is False:
VoiceAssistant.wakeup_response()
else:
print("assistant is busy")
if __name__ == '__main__':
server_bus = SessionBus()
server_bus.publish("org.LinuxAssistantServer", DBusService())
loop = GLib.MainLoop()
thread = threading.Thread(target=loop.run)
thread.daemon = True
thread.start()
detector = snowboydecoder.HotwordDetector(SNOWBOY_MODEL_PATH, sensitivity=0.5, audio_gain=1)
thread2 = threading.Thread(target=detector.start, kwargs=dict(detected_callback=detected_callback, recording_timeout=30))
thread2.start()
|
hyperlink_preview.py
|
"""
Parse an url preview data (base on Open Graph protocol, but not only)
Instantiate a HyperLinkPreview object.
"""
import logging
import queue
from threading import Thread, Lock, Event
from typing import Dict, Optional
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
from . import utils
from . import image_size
logger = logging.getLogger('hyperlinkpreview')
class HyperLinkPreview:
"""
Class to parse an url preview data (base on Open Graph protocol, but not only)
Warning: constructor raises if url is not accessible: handle it.
"""
properties = ['title', 'type', 'image', 'url', 'description', 'site_name']
def __init__(self, url:str):
"""
Raises:
- requests.exceptions.RequestException: if cannot get url
- ValueError if no url or None
"""
self.data_lock = Lock()
self.is_valid = False
self.full_parsed = Event()
self._datas: Dict[str, Optional[str]] = \
{property: None for property in HyperLinkPreview.properties}
if url is None or not url:
raise ValueError("url is None")
_html = self._fetch(url)
if logger.getEffectiveLevel() <= logging.DEBUG:
logger.debug(f"fetched html size: {len(_html)}")
self.link_url = url
self._parse(_html)
def get_data(self, wait_for_imgs=True):
"""
Args:
wait_for_imgs: - if True, waits for the images parse before returning.
The image parse is when no image info in the head, and we need to parse the whole html for img tags.
- if False, retruns without waiting. Caller should check the 'image' value in the returned dict,
if it is None, another call to this method with wait_for_imgs=True is required to have the image.
Returns:
The data dict (a copy). Keys are ['title', 'type', 'image', 'url', 'description', 'site_name']
"""
if wait_for_imgs:
self.full_parsed.wait()
with self.data_lock:
return self._datas.copy()
with self.data_lock:
return self._datas.copy()
def _fetch(self, url: str) -> str:
"""
Returns:
the html content of the given url
Raises:
requests.exceptions.RequestException: If cannot get url.
"""
try:
return requests.get(url).text
except requests.exceptions.RequestException as ex:
logging.error("Cannot fetch url [%s]: [%s]", url, ex)
raise ex
def _parse(self, html):
"""
First parse og tags, then search deeper if some tags were not present.
"""
if not html:
self.full_parsed.set()
return
i = 0
html_len = len(html)
skip_chars = ["\n", "\r", "\t", " "]
while i < html_len and html[i] in skip_chars:
i += 1
if not html[i] == "<" and not html[i + 1] == "<":
self.full_parsed.set()
return
with self.data_lock:
soup = BeautifulSoup(str(html), "html.parser")
self.is_valid = True
metas = soup.findAll("meta")
for one_meta_tag in metas:
_property = utils.has_og_property(one_meta_tag, HyperLinkPreview.properties)
if _property:
try:
self._datas[_property] = one_meta_tag["content"]
except: # pylint: disable=bare-except
pass # don't care if meta tag has no "content" attribute.
self._parse_deeper_url()
self._parse_deeper_domain()
self._parse_deeper_site_name()
self._parse_deeper_title(soup)
self._parse_deeper_description(soup)
self._parse_deeper_image(soup)
def _parse_deeper_url(self):
url = self._datas["url"]
if url:
return
self._datas["url"] = self.link_url
def _parse_deeper_domain(self):
url = self._datas["url"]
domain= urlparse(url).netloc
self._datas["domain"] = str(domain)
def _parse_deeper_site_name(self):
name = self._datas["site_name"]
if name:
return
domain = self._datas["domain"]
if not domain:
return
name = domain.replace("www.", "")
try:
name = name[0:name.rindex(".")]
except: # pylint: disable=bare-except
pass
self._datas["site_name"] = name
def _parse_deeper_title(self, soup: BeautifulSoup):
title = self._datas["title"]
if title:
return
title_tag = soup.find('title')
if title_tag:
self._datas["title"] = title_tag.text
return
title_tag = soup.find('h1')
if title_tag:
self._datas["title"] = title_tag.text
return
title_tag = soup.find('h2')
if title_tag:
self._datas["title"] = title_tag.text
return
def _parse_deeper_description(self, soup: BeautifulSoup):
"""
If self.get_description() == None, search a description in:
- <meta name="description">
- then <meta name="twitter:description">
- then: 1000 first char of text in <p> tags.
"""
description = self._datas["description"]
if description:
return
description_meta_tag = soup.find('meta', {"name": "Description"})
if description_meta_tag:
self._datas["description"] = str(description_meta_tag["content"]) # type: ignore
return
# usually twitter description are for subscription: it's not a description on the page.
# description_meta_tag = soup.find('meta', {"name": "twitter:description"})
# if description_meta_tag:
# self.datas["description"] = description_meta_tag["content"]
# return
# let's take the visible text from <p>:
p_tags = soup.findAll('p')
visible_p_tags = list(filter(utils.tag_visible, p_tags))
visible_text = " ".join(one_p.text.strip() for one_p in visible_p_tags)
visible_text = visible_text.replace("\n", " ")
visible_text = ' '.join(visible_text.split()) # remove multiple spaces
self._datas["description"] = visible_text[0:1000]
def _parse_deeper_image(self, soup):
image = self._datas["image"]
if image:
self.full_parsed.set()
return
image_tag = soup.find('link', {"rel": "image_src"})
if image_tag:
self._datas["image"] = image_tag["href"]
self.full_parsed.set()
return
# No image info provided. We'll search for all images, in a dedicated thread:
Thread(target=self._parse_deeper_image_in_tags, args=[soup]).start()
def _parse_deeper_image_in_tags(self, soup):
try:
src_queue = queue.Queue()
img_tags = soup.findAll("img")
candidates = image_size.ImageDataList()
for one_tag in img_tags:
try:
src = one_tag["src"]
except: # pylint: disable=bare-except
continue
src = utils.get_img_url(src, utils.get_base_url(self.link_url))
if src is None:
continue
src_queue.put(src)
for _ in range(16):
Thread(target=self.fetch_image_size, args=[src_queue, candidates], daemon=True).start()
src_queue.join()
with self.data_lock:
self._datas["image"] = candidates.get_best_image()
finally:
self.full_parsed.set()
def fetch_image_size(self, src_queue, candidates: image_size.ImageDataList):
"""
Args:
src_queue: the queue containing all urls to image to fetch and get size.
candidates: the list to append images
"""
try:
while True:
src = src_queue.get(block=False)
# logging.debug(f"Start processing {src}")
try: # important to avoid dead lock of queue join.
with requests.get(src, stream=True) as response:
if response.status_code == 200:
width, height = image_size.get_size(response)
# logging.debug(f"Processing {src}: width: [{width}]")
if width != -1:
candidates.append(image_size.ImageSize(src, width, height))
except: # pylint: disable=bare-except
# logging.debug(f"End processing {src}: exception")
pass
finally:
src_queue.task_done()
except queue.Empty:
# logging.debug(f"End processing: Queue empty")
pass
|
spark_monitor.py
|
from ..utils import filter_dict, load_json, join_url, merge_dict, subtract_dicts
from threading import Thread, Lock
from requests import post
from socket import gethostname, gethostbyname
from time import sleep
from json import dumps
class SparkMonitor:
def __init__(self, addr, port, url="spark-master:4040", interval=5, check_interval=.4):
self.__url = f"http://{url}/api/v1/applications"
self.__addr = f"http://{addr}:{port}"
self.__header = {"from": "spark_monitor"}
self.__interval = interval
self.__check_interval = check_interval
self.__mutex = Lock()
self.__data = []
self.__ip = gethostbyname(gethostname())
self.__app_id = self.__get_app_id()
def __get_app_id(self):
""" Get Spark app ID
Path -> <ip>:<port>/api/v1/applications/
"""
tmp = []
try:
tmp = load_json(self.__url)[0]["id"]
except Exception as e:
print(e)
return tmp
def __get_stage_info(self):
""" Get stage info of app
Path -> <ip>:<port>/api/v1/applications/<app_id>/stages/<stage_id>
"""
return load_json(join_url(self.__url, self.__app_id, "stages"))
def __get_executor_info(self):
""" Get executor info based on worker IP
Path -> <ip>:<port>/api/v1/applications/<app_id>/stages/<stage_id>
"""
executors = load_json(join_url(self.__url, self.__app_id, "executors"))
return list(filter(lambda executor: self.__ip in executor["hostPort"], executors))
def __get_info(self, method):
""" Applies an condition to a method and updates app_id if it is empty """
if not self.__app_id:
self.__app_id = self.__get_app_id()
else:
return method()
def __get_cpu_usage(self):
""" Checks the REST API provided by Spark and gets the CPU usage"""
while True:
try:
stage = self.__get_stage_info()[1]
status = stage["status"]
if status == "ACTIVE":
cpu = stage["executorCpuTime"]
print(cpu)
if cpu:
with self.__mutex:
self.__data.append(cpu)
sleep(self.__check_interval)
except Exception as e:
print(e, "getcpuusage")
def __get_data(self):
""" Retrieves data from Spark REST API as dictionary """
filters = [
"executorCpuTime", "totalShuffleWrite", "totalShuffleRead",
"totalInputBytes", "memoryUsed", "totalGCTime"
]
try:
cpu_usage = 0
executor = filter_dict(self.__get_info(self.__get_executor_info)[0], filters)
sleep(self.__interval)
executor_new = filter_dict(self.__get_info(self.__get_executor_info)[0], filters)
executor = subtract_dicts(executor, executor_new)
cpu_usage = sum(set(self.__data)) / len(set(self.__data)) if self.__data else 0
with self.__mutex:
self.__data = []
return merge_dict({"executorCpuTime": cpu_usage}, executor)
except Exception as e:
print(e)
def start(self):
""" Starts SparkMonitor and post retrieved data to collector.py """
while not self.__app_id:
sleep(0.1)
self.__app_id = self.__get_app_id()
t0 = Thread(target=self.__get_cpu_usage)
t0.start()
while True:
data = self.__get_data()
print(data)
try:
if data:
post(self.__addr, json=dumps(
data), headers=self.__header, timeout=0.8)
except Exception as e:
print(e)
print("Connection refused. Retrying...")
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1040
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if __name__ != "__main__":
spinner.close()
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringd',
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
'rtshield',
]
# starting dmonitoringmodeld when modeld is initializing can sometimes \
# result in a weird snpe state where dmon constantly uses more cpu than normal.
car_started_processes += ['modeld']
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /sdcard/unkillable_reboot")
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
EnableLogger = int(params.get('OpkrEnableLogger'))
if not EnableLogger:
car_started_processes.remove( 'loggerd' )
persistent_processes.remove( 'logmessaged' )
persistent_processes.remove( 'uploader' )
persistent_processes.remove( 'logcatd' )
persistent_processes.remove( 'updated' )
persistent_processes.remove( 'deleter' )
persistent_processes.remove( 'tombstoned' )
else:
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "1"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoShutdown", "3"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightness", "0"),
("OpkrEnableDriverMonitoring", "1"),
("OpkrEnableLogger", "0"),
("OpkrEnableGetoffAlert", "1"),
("OpkrAutoResume", "1"),
("OpkrVariableCruise", "0"),
("OpkrLaneChangeSpeed", "60"),
("OpkrAutoLaneChangeDelay", "0"),
("OpkrSteerAngleCorrection", "0"),
("PutPrebuiltOn", "0"),
("FingerprintIssuedFix", "0"),
("LdwsCarFix", "0"),
("LateralControlMethod", "0"),
("CruiseStatemodeSelInit", "1"),
("InnerLoopGain", "30"),
("OuterLoopGain", "20"),
("TimeConstant", "10"),
("ActuatorEffectiveness", "15"),
("Scale", "1750"),
("LqrKi", "10"),
("DcGain", "30"),
("IgnoreZone", "0"),
("PidKp", "20"),
("PidKi", "40"),
("PidKf", "5"),
("CameraOffsetAdj", "60"),
("SteerRatioAdj", "135"),
("SteerActuatorDelayAdj", "35"),
("SteerRateCostAdj", "50"),
("SteerLimitTimerAdj", "80"),
("TireStiffnessFactorAdj", "75"),
("SteerMaxAdj", "380"),
("SteerMaxBaseAdj", "255"),
("SteerDeltaUpAdj", "3"),
("SteerDeltaDownAdj", "7"),
("SteerMaxvAdj", "10"),
("OpkrBatteryChargingControl", "1"),
("OpkrBatteryChargingMin", "70"),
("OpkrBatteryChargingMax", "80"),
("OpkrUiOpen", "0"),
("OpkrDriveOpen", "0"),
("OpkrTuneOpen", "0"),
("OpkrControlOpen", "0"),
("LeftCurvOffsetAdj", "0"),
("RightCurvOffsetAdj", "0"),
("DebugUi1", "0"),
("DebugUi2", "0"),
("OpkrBlindSpotDetect", "0"),
("OpkrMaxAngleLimit", "90"),
("OpkrAutoResumeOption", "0"),
("OpkrAngleOffsetSelect", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
tutorial_mscolab.py
|
"""
mss.tutorials.tutorial_mscolab
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This python script generates an automatic demonstration of how to use Mission Support System Collaboration for users
to collaborate in flight planning and thereby explain how to use it's various functionalities.
This file is part of mss.
:copyright: Copyright 2021 Hrithik Kumar Verma
:copyright: Copyright 2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyautogui as pag
import multiprocessing
import sys
import os.path
from sys import platform
from pyscreeze import ImageNotFoundException
from tutorials import screenrecorder as sr
from mslib.msui import mss_pyui
def initial_ops():
"""
Executes the initial operations such as closing all opened windows and showing the desktop.
"""
pag.sleep(5)
if platform in ('linux', 'linux2'):
pag.hotkey('winleft', 'd')
print("\n INFO : Automation is running on Linux system..\n")
elif platform == "darwin":
pag.hotkey('option', 'command', 'm')
print("\n INFO : Automation is running on Mac OS..\n")
elif platform == "win32":
pag.hotkey('win', 'd')
print("\nINFO : Automation is running on Windows OS..\n")
else:
pag.alert(text="Sorry, no support on this platform!", title="Platform Exception", button='OK')
def call_recorder():
"""
Calls the screen recorder class to start the recording of the automation.
"""
rec = sr.ScreenRecorder()
rec.capture()
rec.stop_capture()
def call_mss():
"""
Calls the main MSS GUI window since operations are to be performed on it only.
"""
mss_pyui.main()
def automate_mscolab():
"""
This is the main automating script of the Mission Support System Collaboration or Mscolab tutorial which will be
recorded and saved to a file having dateframe nomenclature with a .mp4 extension(codec).
"""
# Giving time for loading of the MSS GUI.
pag.sleep(5)
# Platform specific things
if platform in ('linux', 'linux2'):
enter = 'enter'
mscolab_path = 'pictures/mscolab/linux/'
win = 'winleft'
ctrl = 'ctrl'
alt = 'altleft'
elif platform == 'win32':
enter = 'enter'
mscolab_path = 'pictures/mscolab/linux/'
win = 'win'
ctrl = 'ctrl'
alt = 'alt'
elif platform == 'darwin':
enter = 'return'
mscolab_path = 'pictures/mscolab/linux/'
ctrl = 'command'
# Different inputs required in mscolab
username = 'John Doe'
email = 'johndoe@gmail.com'
password = 'johndoe'
p_name = 'operation_of_john_doe'
p_description = """This is John Doe's operation. He wants his collegues and friends to collaborate on this operation
with him in the network. Mscolab, here, will be very helpful for Joe with various features to use!"""
chat_message1 = 'Hi buddy! What\'s the next plan? I have marked the points in topview for the dummy operation.' \
'Just have a look, please!'
chat_message2 = 'Hey there user! This is the chat feature of MSCOLAB. You can have a conversation with your ' \
'fellow mates about the operation and discuss ideas and plans.'
search_message = 'chat feature of MSCOLAB'
localhost_url = 'http://localhost:8083'
# Example upload of mss logo during Chat Window demonstration.
path = os.path.normpath(os.getcwd() + os.sep + os.pardir)
example_image_path = os.path.join(path, 'docs/mss-logo.png')
file_x, file_y = None, None
open_operations_x, open_operations_y = None, None
selectall_left_x, selectall_left_y = None, None
selectall_right_x, selectall_right_y = None, None
modify_x, modify_y = None, None
previous_x, previous_y = None, None
work_async_x, work_async_y = None, None
wp1_x, wp1_y = None, None
wp2_x, wp2_y = None, None
# Maximizing the window
try:
pag.hotkey('ctrl', 'command', 'f') if platform == 'darwin' else pag.hotkey(win, 'up')
except Exception:
print("\nException : Enable Shortcuts for your system or try again!")
pag.sleep(4)
# Connecting to Mscolab (Mscolab localhost server must be activated beforehand for this to work)
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}connect_to_mscolab.png')
pag.sleep(1)
pag.click(x, y, duration=2)
pag.sleep(2)
# Entering local host URL
try:
x1, y1 = pag.locateCenterOnScreen(f'{mscolab_path}connect.png')
pag.click(x1 - 100, y1, duration=2)
pag.sleep(1)
pag.hotkey(ctrl, 'a')
pag.sleep(1)
pag.typewrite(localhost_url, interval=0.2)
pag.sleep(1)
pag.click(x1, y1, duration=2)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Connect (to localhost)\' button not found on the screen.")
# Adding a new user
try:
x2, y2 = pag.locateCenterOnScreen(f'{mscolab_path}add_user.png')
pag.click(x2, y2, duration=2)
pag.sleep(4)
# Entering details of new user
new_user_input = [username, email, password, password]
for input in new_user_input:
pag.typewrite(input, interval=0.2)
pag.sleep(1)
pag.press('tab')
pag.sleep(2)
pag.press('tab')
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
if pag.locateCenterOnScreen(f'{mscolab_path}emailid_taken.png') is not None:
print("The email id you have provided is already registered!")
pag.sleep(1)
pag.press('left')
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
# Entering details of the new user that's created
pag.press('tab', presses=2, interval=1)
pag.typewrite(email, interval=0.2)
pag.press('tab')
pag.typewrite(password, interval=0.2)
try:
x3, y3 = pag.locateCenterOnScreen(f'{mscolab_path}login.png')
pag.click(x3, y3, duration=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Login (User login)\' button not found on the screen.")
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Add user\' button not found on the screen.")
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Connect to Mscolab\' button not found on the screen.")
# Opening a new Mscolab Operation
try:
file_x, file_y = pag.locateCenterOnScreen(f'{mscolab_path}file.png')
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=2)
for _ in range(2):
pag.press('down')
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
pag.press('tab')
for input in [p_name, p_description]:
pag.typewrite(input, interval=0.05)
pag.press('tab')
pag.sleep(2)
try:
x1, y1 = pag.locateCenterOnScreen(f'{mscolab_path}addop_ok.png')
pag.moveTo(x1, y1, duration=2)
pag.click(x1, y1, duration=2)
pag.sleep(2)
pag.press(enter)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Ok\' button when adding a new operation not found on the screen.")
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'File\' menu button not found on the screen.")
try:
open_operations_x, open_operations_y = pag.locateCenterOnScreen(f'{mscolab_path}openop.png')
pag.moveTo(open_operations_x, open_operations_y + 20, duration=2)
pag.sleep(1)
pag.doubleClick(open_operations_x, open_operations_y + 20, duration=2)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Operations\' label not found on the screen.")
# Managing Users for the operation that you are working on
if file_x is not None and file_y is not None:
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=2)
pag.press('right', presses=2, interval=2)
pag.press('down', presses=2, interval=2)
pag.press(enter)
pag.sleep(3)
else:
print('Image not Found : File menu not found (while managing users)')
# Demonstrating search and select of the users present in the network.
try:
selectall_left_x, selectall_left_y = pag.locateCenterOnScreen(f'{mscolab_path}manageusers_left_selectall.png')
pag.moveTo(selectall_left_x, selectall_left_y, duration=2)
pag.click(selectall_left_x, selectall_left_y, duration=1)
pag.sleep(2)
pag.moveTo(selectall_left_x + 90, selectall_left_y, duration=2)
pag.click(selectall_left_x + 90, selectall_left_y, duration=1)
pag.sleep(2)
pag.click(selectall_left_x - 61, selectall_left_y, duration=1)
pag.typewrite('test', interval=1)
pag.moveTo(selectall_left_x, selectall_left_y, duration=2)
pag.click(duration=2)
pag.sleep(1)
pag.moveTo(selectall_left_x + 90, selectall_left_y, duration=2)
pag.click(duration=2)
pag.sleep(2)
# Deleting search item from the search box
pag.click(selectall_left_x - 61, selectall_left_y, duration=2)
pag.sleep(1)
pag.hotkey(ctrl, 'a')
pag.sleep(1)
pag.press('backspace')
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Select All\' leftside button not found on the screen.")
# Selecting and adding users for collaborating in the operation.
if selectall_left_x is not None and selectall_left_y is not None:
row_gap = 30
pag.moveTo(selectall_left_x, selectall_left_y + 57, duration=1)
pag.click(selectall_left_x, selectall_left_y + 57, duration=1)
for _ in range(3):
pag.move(None, row_gap, duration=1)
pag.click(duration=1)
pag.sleep(2)
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}manageusers_add.png')
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Add (all the users)\' button not found on the screen.")
else:
print('Not able to select users for adding')
# Searching and changing user permissions and deleting users
try:
selectall_right_x, selectall_right_y = pag.locateCenterOnScreen(f'{mscolab_path}manageusers_right_selectall.png'
)
pag.moveTo(selectall_right_x - 170, selectall_right_y, duration=2)
pag.click(selectall_right_x - 170, selectall_right_y, duration=2)
pag.typewrite('risehr', interval=0.3)
pag.sleep(1)
pag.hotkey(ctrl, 'a')
pag.press('backspace')
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Select All (modifying permissions)\' button not found on the screen.")
# Selecting and modifying user roles
if selectall_right_x is not None and selectall_right_y is not None:
row_gap = 30
for i in range(3):
pag.moveTo(selectall_right_x, selectall_right_y + 56, duration=1)
pag.move(None, row_gap * (i + 1), duration=1)
pag.click(duration=1)
pag.sleep(2)
try:
modify_x, modify_y = pag.locateCenterOnScreen(f'{mscolab_path}manageusers_modify.png')
pag.click(modify_x - 141, modify_y, duration=2)
if i == 0:
pag.press('up', presses=2)
else:
pag.press('down')
pag.sleep(1)
pag.press(enter)
pag.sleep(1)
pag.click(modify_x, modify_y, duration=2)
pag.sleep(1)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Modify (access permissions)\' button not found on the screen.")
# Deleting the first user in the list
pag.moveTo(selectall_right_x, selectall_right_y + 56, duration=1)
pag.click(selectall_right_x, selectall_right_y + 56, duration=1)
if modify_x is not None and modify_y is not None:
pag.moveTo(modify_x + 160, modify_y, duration=2)
pag.click(modify_x + 160, modify_y, duration=2)
pag.sleep(2)
else:
print('Image Not Found: Modify button has previously not found on the screen')
# Filtering through access roles
pag.click(selectall_right_x - 82, selectall_right_y, duration=2)
pag.press('up', presses=3, interval=0.5)
pag.sleep(1)
for _ in range(3):
pag.click(selectall_right_x - 82, selectall_right_y, duration=2)
pag.press('down')
pag.sleep(1)
pag.press(enter)
pag.sleep(1)
pag.sleep(1)
else:
print('Image Not Found: Select All button has previously not found on the screen')
# Closing user permission window
pag.hotkey('command', 'w') if platform == 'dawrin' else pag.hotkey(alt, 'f4')
pag.sleep(2)
# Demonstrating Chat feature of mscolab to the user
if file_x is not None and file_y is not None:
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=2)
pag.press('right', presses=2, interval=2)
pag.press(enter)
pag.sleep(3)
else:
print('Image not Found : File menu not found (while opening Chat window)')
# Sending messages to collaboraters or other users
pag.typewrite(chat_message1, interval=0.05)
pag.sleep(2)
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}chat_send.png')
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
pag.sleep(2)
pag.typewrite(chat_message2, interval=0.05)
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
# Uploading an example image of mss logo.
pag.moveTo(x, y + 40, duration=2)
pag.click(x, y + 40, duration=2)
pag.sleep(1)
pag.typewrite(example_image_path, interval=0.2)
pag.sleep(1)
pag.press(enter)
pag.sleep(1)
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Send (while in chat window)\' button not found on the screen.")
# Searching messages in the chatbox using the search bar
try:
previous_x, previous_y = pag.locateCenterOnScreen(f'{mscolab_path}chat_previous.png')
pag.moveTo(previous_x - 70, previous_y, duration=2)
pag.click(previous_x - 70, previous_y, duration=2)
pag.sleep(1)
pag.typewrite(search_message, interval=0.3)
pag.sleep(1)
pag.moveTo(previous_x + 82, previous_y, duration=2)
pag.click(previous_x + 82, previous_y, duration=2)
pag.sleep(2)
pag.moveTo(previous_x, previous_y, duration=2)
pag.click(previous_x, previous_y, duration=2)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Previous (while in chat window searching operation)\' button not found on the screen.")
# Closing the Chat Window
pag.hotkey('command', 'w') if platform == 'darwin' else pag.hotkey(alt, 'f4')
pag.sleep(2)
# Opening Topview
if file_x is not None and file_y is not None:
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=2)
pag.sleep(1)
pag.press('right')
pag.sleep(1)
pag.press(enter)
pag.sleep(4)
# Adding some waypoints to topview
try:
x, y = pag.locateCenterOnScreen('pictures/add_waypoint.PNG')
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
pag.move(-50, 150, duration=1)
pag.click(interval=2)
wp1_x, wp1_y = pag.position()
pag.sleep(1)
pag.move(65, 65, duration=1)
pag.click(duration=2)
wp2_x, wp2_y = pag.position()
pag.sleep(1)
pag.move(-150, 30, duration=1)
pag.click(duration=2)
pag.sleep(1)
pag.move(180, 100, duration=1)
pag.click(duration=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : Add waypoint (in topview) button not found on the screen.")
# Closing the topview
pag.hotkey('command', 'w') if platform == 'darwin' else pag.hotkey(alt, 'f4')
pag.press('left')
pag.sleep(1)
pag.press(enter)
pag.sleep(1)
# Opening version history window.
if file_x is not None and file_y is not None:
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=2)
pag.press('right', presses=2, interval=1)
pag.sleep(1)
pag.press('down')
pag.sleep(1)
pag.press(enter)
pag.sleep(1)
# Operations performed in version history window.
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}refresh_window.png')
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
pag.sleep(2)
pag.click(x, y + 32, duration=2)
pag.sleep(1)
pag.press('down')
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
pag.moveTo(x, y + 164, duration=1)
pag.click(x, y + 164, duration=1)
pag.sleep(4)
# Changing this change to a named version
try:
# Giving name to a change version.
x1, y1 = pag.locateCenterOnScreen(f'{mscolab_path}name_version.png')
pag.sleep(1)
pag.moveTo(x1, y1, duration=2)
pag.click(x1, y1, duration=2)
pag.sleep(1)
pag.typewrite('Initial waypoint', interval=0.3)
pag.sleep(1)
pag.press(enter)
pag.sleep(1)
pag.moveTo(x, y + 93, duration=1)
pag.click(x, y + 93, duration=1)
pag.sleep(2)
pag.moveTo(x, y + 125, duration=1)
pag.click(x, y + 125, duration=1)
pag.sleep(1)
# Checking out to a particular version
pag.moveTo(x1 + 95, y1, duration=2)
pag.click(x1 + 95, y1, duration=1)
pag.sleep(1)
pag.press('left')
pag.sleep(2)
pag.press(enter)
pag.sleep(2)
# Filtering changes to display only named changes.
pag.moveTo(x1 + 29, y1, duration=1)
pag.click(x1 + 29, y1, duration=1)
pag.sleep(1)
pag.press('up')
pag.sleep(1)
pag.press(enter)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : Name Version (in topview) button not found on the screen.")
except (ImageNotFoundException, OSError, Exception):
print("\nException : Refresh Window (in version history window) button not found on the screen.")
# Closing the Version History Window
pag.hotkey('command', 'w') if platform == 'darwin' else pag.hotkey(alt, 'f4')
pag.sleep(4)
# Activate Work Asynchronously with the mscolab server.
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}work_asynchronously.png')
pag.sleep(1)
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
work_async_x, work_async_y = pag.position()
pag.sleep(3)
# Opening Topview again to move waypoints during working locally!
if file_x is not None and file_y is not None:
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=2)
pag.press('right')
pag.sleep(1)
pag.press(enter)
pag.sleep(4)
# Moving waypoints.
try:
if wp1_x is not None and wp2_x is not None:
x, y = pag.locateCenterOnScreen('pictures/move_waypoint.PNG')
pag.click(x, y, interval=2)
pag.moveTo(wp2_x, wp2_y, duration=1)
pag.click(interval=2)
pag.dragRel(100, 150, duration=1)
pag.moveTo(wp1_x, wp1_y, duration=1)
pag.dragRel(35, -50, duration=1)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\n Exception : Move Waypoint button could not be located on the screen")
# Closing topview after displacing waypoints
pag.hotkey('command', 'w') if platform == 'darwin' else pag.hotkey(alt, 'f4')
pag.press('left')
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
# Saving to Server the Work that has been done asynchronously.
if work_async_x is not None and work_async_y is not None:
pag.moveTo(work_async_x + 600, work_async_y, duration=2)
pag.click(work_async_x + 600, work_async_y, duration=2)
pag.press('down', presses=2, interval=1)
pag.press(enter)
pag.sleep(3)
# Overwriting Server waypoints with Local Waypoints.
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}overwrite_waypoints.png')
pag.sleep(1)
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
pag.sleep(2)
pag.press(enter)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : Overwrite with local waypoints (during saving to server) button"
" not found on the screen.")
# Unchecking work asynchronously
pag.moveTo(work_async_x, work_async_y, duration=2)
pag.click(work_async_x, work_async_y, duration=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : Work Asynchronously (in mscolab) checkbox not found on the screen.")
# Activating a local flight track
if open_operations_x is not None and open_operations_y is not None:
pag.moveTo(open_operations_x - 900, open_operations_y + 20, duration=2)
pag.sleep(1)
pag.doubleClick(open_operations_x - 900, open_operations_y + 20, duration=2)
pag.sleep(2)
else:
print("Image Not Found : Open Operations label (for activating local flighttrack) not found, previously!")
# Opening Topview again and making some changes in it
if file_x is not None and file_y is not None:
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=2)
pag.sleep(1)
pag.press('right')
pag.sleep(1)
pag.press(enter)
pag.sleep(4)
# Adding waypoints in a different fashion than the pevious one (for local flighttrack)
try:
x, y = pag.locateCenterOnScreen('pictures/add_waypoint.PNG')
pag.moveTo(x, y, duration=2)
pag.click(x, y, duration=2)
pag.move(-50, 150, duration=1)
pag.click(interval=2)
pag.sleep(1)
pag.move(65, 10, duration=1)
pag.click(duration=2)
pag.sleep(1)
pag.move(-100, 10, duration=1)
pag.click(duration=2)
pag.sleep(1)
pag.move(90, 10, duration=1)
pag.click(duration=2)
pag.sleep(3)
# Sending topview to the background
pag.hotkey('ctrl', 'up')
except (ImageNotFoundException, OSError, Exception):
print("\nException : Add waypoint (in topview again) button not found on the screen.")
# Activating the opened mscolab operation
if open_operations_x is not None and open_operations_y is not None:
pag.moveTo(open_operations_x, open_operations_y + 20, duration=2)
pag.sleep(1)
pag.doubleClick(open_operations_x, open_operations_y + 20, duration=2)
pag.sleep(3)
# Opening the topview again by double clicking on open views
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}openviews.png')
pag.moveTo(x, y + 22, duration=2)
pag.doubleClick(x, y + 22, duration=2)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : Open Views label not found on the screen.")
# Closing the topview
pag.hotkey('command', 'w') if platform == 'darwin' else pag.hotkey(alt, 'f4')
pag.press('left')
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
else:
print("Image Not Found : Open Operations label (for activating mscolab operation) not found, previously!")
# Deleting the operation
if file_x is not None and file_y is not None:
pag.moveTo(file_x, file_y, duration=2)
pag.click(file_x, file_y, duration=1)
pag.sleep(1)
pag.press('right', presses=2, interval=1)
pag.sleep(1)
pag.press('down', presses=3, interval=1)
pag.press(enter, presses=2, interval=2)
pag.sleep(2)
pag.typewrite(p_name, interval=0.3)
pag.press(enter, presses=2, interval=2)
pag.sleep(3)
# Opening user profile
try:
x, y = pag.locateCenterOnScreen(f'{mscolab_path}johndoe_profile.png')
pag.moveTo(x + 32, y, duration=2)
pag.click(x + 32, y, duration=2)
pag.sleep(1)
pag.press('down')
pag.sleep(1)
pag.press(enter, presses=2, interval=2)
pag.sleep(2)
pag.click(x + 32, y, duration=2)
pag.sleep(1)
pag.press('down', presses=2, interval=2)
pag.press(enter)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException : John Doe (in mscolab window) Profile/Logo button not found on the screen.")
print("\nAutomation is over for this tutorial. Watch next tutorial for other functions.")
# Close Everything!
try:
if platform == 'linux' or platform == 'linux2':
pag.hotkey('altleft', 'f4')
pag.sleep(2)
pag.press('left')
pag.sleep(1)
pag.press('enter')
pag.sleep(2)
pag.keyDown('altleft')
pag.press('tab')
pag.press('left')
pag.keyUp('altleft')
pag.press('q')
if platform == 'win32':
pag.hotkey('alt', 'f4')
pag.sleep(2)
pag.press('left')
pag.sleep(1)
pag.press('enter')
pag.sleep(2)
pag.hotkey('alt', 'tab')
pag.press('q')
elif platform == 'darwin':
pag.hotkey('command', 'w')
pag.sleep(2)
pag.press('left')
pag.sleep(1)
pag.press('return')
pag.sleep(2)
pag.hotkey('command', 'tab')
pag.press('q')
except Exception:
print("Cannot automate : Enable Shortcuts for your system or try again")
def main():
"""
This function runs the above functions as different processes at the same time and can be
controlled from here. (This is the main process.)
"""
p1 = multiprocessing.Process(target=call_mss)
p2 = multiprocessing.Process(target=automate_mscolab)
p3 = multiprocessing.Process(target=call_recorder)
print("\nINFO : Starting Automation.....\n")
p3.start()
pag.sleep(3)
initial_ops()
p1.start()
p2.start()
p2.join()
p1.join()
p3.join()
print("\n\nINFO : Automation Completes Successfully!")
sys.exit()
if __name__ == '__main__':
main()
|
cluster_coordinator_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for coordinator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import os
import platform
import sys
import threading
import time
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.coordinator import cluster_coordinator as coordinator_lib
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training.server_lib import ClusterSpec
class CoordinatedClosureQueueTest(test.TestCase):
def testBasic(self):
queue = coordinator_lib._CoordinatedClosureQueue()
closure1 = self._create_closure(queue._cancellation_mgr)
queue.put(closure1)
self.assertIs(closure1, queue.get())
self.assertFalse(queue.done())
queue.put_back(closure1)
self.assertEqual(closure1, queue.get())
queue.mark_finished()
self.assertTrue(queue.done())
queue.wait()
def testProcessAtLeaseOnce(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
labels = ['A', 'B', 'C', 'D', 'E']
processed_count = collections.defaultdict(int)
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
has_been_put_back = False
while True:
closure = closure_queue.get(timeout=30)
if closure is None:
break
if not has_been_put_back:
has_been_put_back = True
closure_queue.put_back(closure)
continue
closure._function()
closure_queue.mark_finished()
def get_func(label):
def func():
time.sleep(3)
processed_count[label] += 1
return func
cm = cancellation.CancellationManager()
for label in labels:
closure_queue.put(coordinator_lib.Closure(get_func(label), cm))
t1 = threading.Thread(target=process_queue, daemon=True)
t1.start()
t2 = threading.Thread(target=process_queue, daemon=True)
t2.start()
# Make sure multiple wait() calls are fine.
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
self.assertEqual(processed_count, collections.Counter(labels))
coord.join([t1, t2])
def testNotifyBeforeWait(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
def func():
logging.info('func running')
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
closure_queue.get()
closure_queue.mark_finished()
closure_queue.put(
coordinator_lib.Closure(func, closure_queue._cancellation_mgr))
t = threading.Thread(target=process_queue)
t.start()
coord.join([t])
# This test asserts that waiting at the time the function has been processed
# doesn't time out.
closure_queue.wait()
def _assert_one_unblock_the_other(self, first_fn, second_fn):
"""Asserts `second_fn` wouldn't return before `first_fn` is finished."""
first_fn_done = threading.Event()
second_fn_done = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def wrapped_first_fn():
with coord.stop_on_exception():
self.assertFalse(second_fn_done.is_set())
first_fn()
first_fn_done.set()
self.assertFalse(first_fn_done.is_set())
t = threading.Thread(target=wrapped_first_fn)
t.start()
second_fn()
self.assertTrue(first_fn_done.is_set())
second_fn_done.set()
coord.join([t])
def testWaitRaiseErrorAfterMarkFailure(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue = coordinator_lib._CoordinatedClosureQueue()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure = closure_queue.get()
wait_finish_event = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
# Using a thread to verify that closure_queue.wait() will not return until
# all inflight closures are finished.
def mark_finished_fn():
try:
raise ValueError('Some error.')
except ValueError as e:
closure_queue.mark_failed(e)
def wait_fn():
with self.assertRaises(ValueError):
closure_queue.wait()
self._assert_one_unblock_the_other(mark_finished_fn, wait_fn)
self.assertTrue(closure_queue.done())
def _create_closure(self, cancellation_mgr):
@def_function.function()
def some_function():
return 1.0
return coordinator_lib.Closure(some_function, cancellation_mgr)
def _put_two_closures_and_get_one(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
closure1 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure1)
closure2 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure2)
closure_got = closure_queue.get() # returns closure1
self.assertIs(closure_got, closure1)
self.assertIsNot(closure_got, closure2)
return closure_queue, closure1, closure2
def testPutRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2.output_remote_value.fetch()
# The error is cleared.
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
def testWaitRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.wait()
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2.output_remote_value.fetch()
# The error is cleared.
closure_queue.wait()
def testDoneRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertFalse(closure_queue.done())
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.done()
def _set_error(self, closure_queue, closure, error):
try:
raise error
except Exception as e: # pylint: disable=broad-except
closure.output_remote_value._set_error(e)
closure_queue.mark_failed(e)
def _test_cancel_closure_when_error(self, call_wait):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, closure1, closure2 = self._put_two_closures_and_get_one()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure_queue.get()
# At this moment, there are two inflight, one in queue.
self.assertEqual(closure_queue._inflight_closure_count, 2)
# Hold a copy of the queue's cancellation manager at this point
initial_cm = closure_queue._cancellation_mgr
# Simulating closure1 fails.
self._set_error(closure_queue, closure1, ValueError('Some error.'))
# At this moment, there are one inflight, one in queue.
self.assertEqual(closure_queue._queue.qsize(), 1)
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure3 = self._create_closure(closure_queue._cancellation_mgr)
def fake_cancellation():
self._set_error(closure_queue, closure2,
ValueError('Fake cancellation error.'))
def report_error():
# It should not report the fake cancellation error.
with self.assertRaisesRegex(ValueError, 'Some error.'):
# Verifying `wait()` or `put()` raises even if one closure is in
# flight.
if call_wait:
closure_queue.wait()
else:
closure_queue.put(closure3)
self._assert_one_unblock_the_other(fake_cancellation, report_error)
# The original cancellation manager of the queue has been cancelled.
self.assertTrue(initial_cm.is_cancelled)
# At this moment, there is zero inflight, nothing in queue.
self.assertTrue(closure_queue._queue.empty())
self.assertEqual(closure_queue._inflight_closure_count, 0)
self.assertIsNone(closure_queue._error)
# This asserts that closure1 has errored.
with self.assertRaisesRegex(ValueError, 'Some error.'):
closure1.output_remote_value.fetch()
# The following asserts that closure3 should have been cancelled.
if not call_wait:
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure3.output_remote_value.fetch()
# Closure2 was an inflight closure when it got cancelled.
self.assertEqual(closure2.output_remote_value._status,
coordinator_lib._RemoteValueStatus.READY)
with self.assertRaisesRegex(ValueError, 'Fake cancellation error.'):
closure2.output_remote_value.fetch()
# This asserts that the queue has a clear state.
self.testBasic()
def testWaitRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=True)
def testPutRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=False)
def testStateIsRestoredAfterJoinIsCalled(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure_queue.mark_failed(ValueError('test error'))
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
# Its error should have been cleared.
self.assertIsNone(closure_queue._error)
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertIsNone(closure_queue._error)
def testThreadSafey(self):
thread_count = 10
queue = coordinator_lib._CoordinatedClosureQueue()
# Each thread performs 20 queue actions: 10 are `put_back` and 10 are
# `mark_finished`.
action_count = 20
def func():
for i in range(action_count):
closure = queue.get()
if i % 2 == 0:
queue.put_back(closure)
else:
queue.mark_finished()
threads = [threading.Thread(target=func) for i in range(thread_count)]
for t in threads:
t.start()
for _ in range(thread_count * action_count // 2):
queue.put(self._create_closure(queue._cancellation_mgr))
queue.wait()
self.assertTrue(queue.done())
class ErrorReportingThread(threading.Thread):
error = None
def __init__(self, *args, **kwargs):
assert 'target' in kwargs
target = kwargs['target']
@functools.wraps(target)
def wrapped_target(*args, **kwargs):
try:
return target(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
ErrorReportingThread.error = e
kwargs['target'] = wrapped_target
super(ErrorReportingThread, self).__init__(*args, **kwargs)
class TestCaseWithErrorReportingThread(test.TestCase):
@classmethod
def setUpClass(cls):
cls._threading_thread = threading.Thread
threading.Thread = ErrorReportingThread
super(TestCaseWithErrorReportingThread, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestCaseWithErrorReportingThread, cls).tearDownClass()
threading.Thread = cls._threading_thread
def setUp(self):
ErrorReportingThread.error = None
super(TestCaseWithErrorReportingThread, self).setUp()
def tearDown(self):
super(TestCaseWithErrorReportingThread, self).tearDown()
if ErrorReportingThread.error:
raise ErrorReportingThread.error # pylint: disable=raising-bad-type
def make_coordinator(num_workers, num_ps):
# TODO(rchao): Test the internal rpc_layer version.
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer='grpc')
cluster_def['chief'] = [
'localhost:%d' % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = SimpleClusterResolver(
ClusterSpec(cluster_def), rpc_layer='grpc')
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
cluster_resolver)
return coordinator_lib.ClusterCoordinator(strategy)
class ClusterCoordinatorTest(TestCaseWithErrorReportingThread):
@classmethod
def setUpClass(cls):
super(ClusterCoordinatorTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=5, num_ps=2)
cls.strategy = cls.coordinator.strategy
def testFnReturnNestedValues(self):
x = constant_op.constant(1)
@def_function.function
def f():
return x + 1, (x + 2, x + 3), [x + 4], {'v': x}
got = self.coordinator.schedule(f)
want = 2, (3, 4), [5], {'v': 1}
self.assertEqual(got.fetch(), want)
self.assertEqual(self.coordinator.fetch(got), want)
def testFetchingRemoteValueStructure(self):
x = constant_op.constant(1)
@def_function.function
def f():
return x + 1, (x + 2, x + 3), [x + 4], {'v': x}
want = 2, (3, 4), [5], {'v': 1}
remote_value_list = [self.coordinator.schedule(f) for _ in range(5)]
self.assertAllEqual(
self.coordinator.fetch(remote_value_list), [want for _ in range(5)])
def testInputFunction(self):
def input_fn():
return dataset_ops.DatasetV2.range(1, 2)
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
return x
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = self.coordinator.fetch(result)
self.assertEqual(result, (1,))
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = self.coordinator.fetch(result)
self.assertEqual(result, (1,))
self.assertAlmostEqual(v.read_value(), 2, delta=1e-6)
def testAsyncScheduleAndJoin(self):
if test_util.is_xla_enabled():
self.skipTest('Assign_add is not deterministic across threads in XLA')
def input_fn():
return dataset_ops.DatasetV2.from_tensor_slices([2] * 10)
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.coordinator.join()
self.assertEqual(v.read_value().numpy(), 0)
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
self.coordinator.join()
# With 5 addition it should be 2*5 = 10.
self.assertEqual(v.read_value().numpy(), 10)
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.coordinator.join()
self.coordinator.join()
self.coordinator.join()
self.assertTrue(self.coordinator.done())
# Likewise, it's now 20.
self.assertEqual(v.read_value().numpy(), 20.)
def testInputFunctionWithMap(self):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
return dataset_ops.DatasetV2.range(0, 10).map(map_fn)
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
self.assertEqual(result.fetch(), (10,))
self.assertEqual(self._map_fn_tracing_count, 1)
def testInputFunctionCreateVariables(self):
def input_fn():
v = variables.Variable(initial_value=0.0)
return v.read_value()
with self.assertRaises(ValueError):
self.coordinator.create_per_worker_dataset(input_fn)
def testDatasetsShuffledDifferently(self):
# This test requires at least two workers in the cluster.
self.assertGreaterEqual(len(self.coordinator._cluster.workers), 2)
random_seed.set_random_seed(None)
def input_fn():
dataset = dataset_ops.DatasetV2.range(0, 100).shuffle(100).batch(1)
return self.strategy.experimental_distribute_dataset(dataset)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
distributed_iterator = iter(distributed_dataset)
# Get elements from the first two iterators.
iterator_1 = distributed_iterator._values[0]
iterator_1._rebuild_on(self.coordinator._cluster.workers[0])
iterator_1 = iterator_1.fetch()
elements_in_iterator_1 = [
self.strategy.experimental_local_results(e)
for e in iterator_1
]
iterator_2 = distributed_iterator._values[1]
iterator_2._rebuild_on(self.coordinator._cluster.workers[1])
iterator_2 = iterator_2.fetch()
elements_in_iterator_2 = [
self.strategy.experimental_local_results(e)
for e in iterator_2
]
self.assertNotAllEqual(elements_in_iterator_1, elements_in_iterator_2)
def testPerWorkerValue(self):
self.skipTest('b/168569314')
var_shape = tuple()
var_dtype = dtypes.float32
var_name = 'var'
def create_var():
var = variables.Variable(
initial_value=0.0, dtype=var_dtype, name=var_name)
self.assertIn('worker', var.device)
return var
worker_local_var = self.coordinator._create_per_worker_resources(create_var)
# The following is a workaround to allow `worker_local_var` to be passed in
# as args to the `coordinator.schedule` method which requires tensor specs
# to trace tf.function but _create_worker_resources' return values don't
# have tensor specs. We can get rid of this workaround once
# _create_worker_resources is able to infer the tensor spec of the return
# value of the function passed in. See b/154675763.
for var in worker_local_var._values:
var._type_spec = tensor_spec.TensorSpec(var_shape, var_dtype, var_name)
def worker_fn(var):
var.assign_add(1.0)
for _ in range(10):
# Which slice of `worker_local_var` will be used will depend on which
# worker the `worker_fn` gets scheduled on.
self.coordinator.schedule(worker_fn, args=(worker_local_var,))
self.coordinator.join()
var_sum = sum(self.coordinator.fetch(worker_local_var._values))
self.assertEqual(var_sum, 10.0)
def testDisallowRemoteValueAsInput(self):
@def_function.function
def func_0():
return 1.0
@def_function.function
def func_1(x):
return x + 1.0
remote_v = self.coordinator.schedule(func_0)
with self.assertRaises(ValueError):
self.coordinator.schedule(func_1, args=(remote_v,))
def testPythonFunctionNotAllowedToSchedule(self):
def func(a):
return array_ops.identity(a)
with self.assertRaisesRegexp(
TypeError,
'`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` '
'only accepts a `tf.function` or a concrete function.'):
self.coordinator.schedule(func, args=(1,))
def testDatasetPartiallyCreatedOnCoordinator(self):
dataset = dataset_ops.DatasetV2.range(1, 10)
@def_function.function
def input_fn():
return dataset.shuffle(9)
@def_function.function
def worker_fn(iterator):
x = next(iterator)
return x
per_worker_dataset = self.coordinator.create_per_worker_dataset(input_fn)
self.coordinator.schedule(worker_fn, args=(iter(per_worker_dataset),))
with self.assertRaisesRegexp(
coordinator_lib.InputError,
'error message is Failed copying input tensor from'):
self.coordinator.join()
class LimitedClosureQueueSizeBasicTest(ClusterCoordinatorTest):
"""Test basic functionality works with explicit maximum closure queue size.
Execute the same set of test cases as in `ClusterCoordinatorTest`, with an
explicit size limit for the closure queue. Note that even when the queue size
is set to infinite, there is still a maximum practical size (depends on host
memory limit) that might cause the queue.put operations to be blocking when
scheduling a large number of closures on a big cluster. These tests make sure
that the coordinator does not run into deadlocks in such scenario.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueSizeBasicTest, cls).setUpClass()
coordinator_lib._CLOSURE_QUEUE_MAX_SIZE = 2
cls.coordinator = make_coordinator(num_workers=5, num_ps=2)
cls.strategy = cls.coordinator.strategy
class ScheduleStartDelayTest(ClusterCoordinatorTest):
"""Test basic functionality works with worker scheduling delay.
This is basically to make sure that setting environment variables
`TF_COORDINATOR_SCHEDULE_START_DELAY` and
`TF_COORDINATOR_SCHEDULE_START_DELAY_MAX` will cause any failure.
"""
@classmethod
def setUpClass(cls):
super(ScheduleStartDelayTest, cls).setUpClass()
os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY'] = '2'
os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY_MAX'] = '4'
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
@classmethod
def tearDownClass(cls):
del os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY']
del os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY_MAX']
super(ScheduleStartDelayTest, cls).tearDownClass()
class ErrorReportingTest(TestCaseWithErrorReportingThread):
@classmethod
def setUpClass(cls):
super(ErrorReportingTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
with cls.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
@def_function.function
def _normal_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
self.iteration.assign_add(1.0)
return math_ops.reduce_mean(math_ops.matmul(x, y))
@def_function.function
def _error_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
check_ops.assert_non_positive_v2(math_ops.reduce_sum(math_ops.matmul(x, y)))
self.iteration.assign_add(1.0)
return self.iteration
@def_function.function
def _long_function(self):
x = random_ops.random_uniform((1000, 1000))
for _ in math_ops.range(10000):
a = random_ops.random_uniform((1000, 1000))
b = random_ops.random_uniform((1000, 1000))
x += math_ops.matmul(a, b)
return x
def testJoinRaiseError(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testScheduleRaiseError(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.coordinator.schedule(self._normal_function)
def testScheduleRaiseErrorWithMultipleFailure(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.coordinator.schedule(self._error_function)
self.coordinator.join()
def testErrorWillbeCleared(self):
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testRemoteValueReturnError(self):
result = self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
result.fetch()
# Clear the error.
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testInputError(self):
worker_local_val = self.coordinator._create_per_worker_resources(
self._error_function)
@def_function.function
def func(x):
return x + 1
result = self.coordinator.schedule(func, args=(worker_local_val,))
with self.assertRaises(coordinator_lib.InputError):
self.coordinator.join()
with self.assertRaises(coordinator_lib.InputError):
result.fetch()
def testCancellation(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
long_function = self.coordinator.schedule(self._long_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
with self.assertRaises(errors.CancelledError):
long_function.fetch()
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.join()
class LimitedClosureQueueErrorTest(ErrorReportingTest):
"""Test error reporting works with explicit maximum closure queue size.
Execute the same set of test cases as in ErrorReportingTest, with an explicit
size limit for the closure queue.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueErrorTest, cls).setUpClass()
coordinator_lib._CLOSURE_QUEUE_MAX_SIZE = 2
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
with cls.coordinator.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
class StrategyIntegrationTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(StrategyIntegrationTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=1, num_ps=1)
cls.strategy = cls.coordinator.strategy
def testRunNotUsedWithClusterCoordinatorSchedule(self):
@def_function.function
def input_fn():
return dataset_ops.DatasetV2.range(1, 3)
with self.strategy.scope():
v = variables.Variable(initial_value=1, dtype=dtypes.int64)
def replica_fn(input_tensor):
return input_tensor + v, input_tensor - v
@def_function.function
def worker_fn(iterator):
return self.strategy.run(replica_fn, args=(next(iterator),))
per_worker_dataset = self.coordinator.create_per_worker_dataset(input_fn)
@contextlib.contextmanager
def _assert_logs_usage_warning():
with self.assertLogs(level='WARNING') as logs:
yield
self.assertIn(
'It is detected that a function used with '
'`tf.distribute.experimental.ParameterServerStrategy` '
'is executed locally on the coordinator. This is inefficient but may '
'be valid for one-off tasks such as inferring output signature. '
'To properly distribute functions to run on workers, `run` or '
'`reduce` should be used within a function passed to `'
'tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`'
'.',
logs.output[0])
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
# A proper `schedule` should succeed.
rv = self.coordinator.schedule(worker_fn, args=(iter(per_worker_dataset),))
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` again should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
all_results = [(2, 0)] * self.strategy.num_replicas_in_sync
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append(all_results[i])
self.assertAllEqual(
tuple(expected_result),
self.strategy.experimental_local_results(rv.fetch()))
def testBasicVariableAssignment(self):
self.strategy.extended._variable_count = 0
with self.strategy.scope():
v1 = variables.Variable(initial_value=0.0)
v2 = variables.Variable(initial_value=1.0)
self.assertEqual(self.strategy.extended._variable_count, 2)
@def_function.function
def worker_fn():
v1.assign_add(0.1)
v2.assign_sub(0.2)
return v1.read_value() / v2.read_value()
results = self.coordinator.schedule(worker_fn)
logging.info('Results of experimental_run_v2: %f',
self.coordinator.fetch(results))
self.assertAlmostEqual(v1.read_value().numpy(), 0.1, delta=1e-6)
self.assertAlmostEqual(v2.read_value().numpy(), 0.8, delta=1e-6)
def testRunAndReduce(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribution_strategy_context.in_cross_replica_context())
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
def testRunAndReduceWithAssignAdd(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
v1 = variables.Variable(
initial_value=0.,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribution_strategy_context.in_cross_replica_context())
v1.assign_add(input_tensor)
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
self.assertEqual(v1, 6.)
def testVariableAggregation(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(
initial_value=1.,
aggregation=variable_scope.VariableAggregation.SUM)
@def_function.function
def worker_fn():
def replica_fn():
value = math_ops.cast(
distribution_strategy_context.get_replica_context()
.replica_id_in_sync_group + 1, v.dtype)
v.assign(value)
self.strategy.run(replica_fn)
self.coordinator.schedule(worker_fn)
self.coordinator.join()
expected_result = 0.
for i in range(self.strategy.num_replicas_in_sync):
expected_result = expected_result + i + 1
self.assertEqual(v, expected_result)
def testDistributeDataset(self):
def per_worker_dataset_fn():
dataset = dataset_ops.DatasetV2.range(1, 11).batch(4)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return self.strategy.experimental_local_results(next(iterator))
distributed_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = result.fetch()
expected_result = array_ops.split(
math_ops.range(1., 5.),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(result, (expected_result))
def testDistributeDatasetsFromFunction(self):
def per_worker_dataset_fn():
def input_worker_device_fn(input_context):
self.assertIsNotNone(input_context)
return dataset_ops.DatasetV2.range(1, 11).batch(1)
return self.strategy.distribute_datasets_from_function(
input_worker_device_fn)
@def_function.function
def worker_fn(iterator):
result = self.strategy.experimental_local_results(next(iterator))
return result
distributed_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = result.fetch()
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append([1 + i])
self.assertAllEqual(result, expected_result)
def testAsyncScheduleWithDistributedDataset(self):
def input_fn():
dataset = dataset_ops.DatasetV2.from_tensor_slices([2.]).repeat().batch(
self.strategy.num_replicas_in_sync)
return self.strategy.experimental_distribute_dataset(dataset)
with self.strategy.scope():
v = variables.Variable(initial_value=[0], dtype=dtypes.float32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
# Reduce to convert PerReplica values to single value
reduced_value = self.strategy.reduce('MEAN', x, axis=None)
v.assign_add(reduced_value)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.coordinator.join()
self.assertAllEqual(v.read_value(), (0,))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
self.coordinator.join()
# With 5 addition it should be 2*5 = 10.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[10]]))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.coordinator.join()
self.coordinator.join()
self.coordinator.join()
self.assertTrue(self.coordinator.done())
# Likewise, it's now 20.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[20]]))
def testInputFunctionWithMapWithDistributedDataset(self):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
dataset = dataset_ops.DatasetV2.range(0, 10).batch(
self.strategy.num_replicas_in_sync).map(map_fn)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
expected_result = array_ops.split(
math_ops.range(10., 10. + self.strategy.num_replicas_in_sync),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(
self.strategy.experimental_local_results(result.fetch()),
tuple(expected_result))
self.assertEqual(self._map_fn_tracing_count, 1)
def testCallingDistributeDatasetOutside(self):
with self.assertRaises(ValueError):
dataset = dataset_ops.DatasetV2.range(1, 2).batch(10)
self.strategy.experimental_distribute_dataset(dataset)
with self.assertRaises(ValueError):
self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.range(1, 2).batch(2))
def testPerWorkerDistributeDatasetsElementSpec(self):
def per_worker_dataset_fn():
return self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.from_tensor_slices([1, 2]))
dataset = dataset_ops.DatasetV2.from_tensor_slices([1, 2])
per_worker_distribute_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
self.assertAllEqual(
# Converts to PerReplicaSpec when num_replicas_in_sync are > 1
input_lib._create_distributed_tensor_spec(self.strategy,
dataset.element_spec),
per_worker_distribute_dataset.element_spec)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
vnhuobi.py
|
# encoding: utf-8
import urllib
import hashlib
import json
import requests
from time import time, sleep
from Queue import Queue, Empty
from threading import Thread
# 常量定义
COINTYPE_BTC = 1
COINTYPE_LTC = 2
ACCOUNTTYPE_CNY = 1
ACCOUNTTYPE_USD = 2
LOANTYPE_CNY = 1
LOANTYPE_BTC = 2
LOANTYPE_LTC = 3
LOANTYPE_USD = 4
MARKETTYPE_CNY = 'cny'
MARKETTYPE_USD = 'usd'
SYMBOL_ETHBTC = 'BTC_CNY'
SYMBOL_LTCBTC = 'LTC_CNY'
SYMBOL_LTCETH = 'BTC_USD'
PERIOD_1MIN = '001'
PERIOD_5MIN = '005'
PERIOD_15MIN = '015'
PERIOD_30MIN = '030'
PERIOD_60MIN = '060'
PERIOD_DAILY = '100'
PERIOD_WEEKLY = '200'
PERIOD_MONTHLY = '300'
PERIOD_ANNUALLY = '400'
# API相关定义
HUOBI_TRADE_API = 'https://api.huobi.pro/v1'
# 功能代码
FUNCTIONCODE_GETACCOUNTINFO = 'get_account_info'
FUNCTIONCODE_GETORDERS = 'get_orders'
FUNCTIONCODE_ORDERINFO = 'order_info'
FUNCTIONCODE_BUY = 'buy'
FUNCTIONCODE_SELL = 'sell'
FUNCTIONCODE_BUYMARKET = 'buy_market'
FUNCTIONCODE_SELLMARKET = 'sell_market'
FUNCTIONCODE_CANCELORDER = 'cancel_order'
FUNCTIONCODE_GETNEWDEALORDERS = 'get_new_deal_orders'
FUNCTIONCODE_GETORDERIDBYTRADEID = 'get_order_id_by_trade_id'
FUNCTIONCODE_WITHDRAWCOIN = 'withdraw_coin'
FUNCTIONCODE_CANCELWITHDRAWCOIN = 'cancel_withdraw_coin'
FUNCTIONCODE_GETWITHDRAWCOINRESULT = 'get_withdraw_coin_result'
FUNCTIONCODE_TRANSFER = 'transfer'
FUNCTIONCODE_LOAN = 'loan'
FUNCTIONCODE_REPAYMENT = 'repayment'
FUNCTIONCODE_GETLOANAVAILABLE = 'get_loan_available'
FUNCTIONCODE_GETLOANS = 'get_loans'
#----------------------------------------------------------------------
def signature(params):
"""生成签名"""
params = sorted(params.iteritems(), key=lambda d:d[0], reverse=False)
message = urllib.urlencode(params)
m = hashlib.md5()
m.update(message)
m.digest()
sig=m.hexdigest()
return sig
########################################################################
class TradeApi(object):
"""交易接口"""
DEBUG = True
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.accessKey = ''
self.secretKey = ''
self.active = False # API工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
#----------------------------------------------------------------------
def processRequest(self, req):
"""处理请求"""
# 读取方法和参数
method = req['method']
params = req['params']
optional = req['optional']
# 在参数中增加必须的字段
params['created'] = long(time())
params['access_key'] = self.accessKey
params['secret_key'] = self.secretKey
params['method'] = method
# 添加签名
sign = signature(params)
params['sign'] = sign
del params['secret_key']
# 添加选填参数
if optional:
params.update(optional)
# 发送请求
payload = urllib.urlencode(params)
r = requests.post(HUOBI_TRADE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
#----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
data = self.processRequest(req)
# 请求失败
if 'code' in data and 'message' in data:
error = u'错误信息:%s' %data['message']
self.onError(error, req, reqID)
# 请求成功
else:
if self.DEBUG:
print callback.__name__
callback(data, req, reqID)
except Empty:
pass
#----------------------------------------------------------------------
def sendRequest(self, method, params, callback, optional=None):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['method'] = method
req['params'] = params
req['callback'] = callback
req['optional'] = optional
req['reqID'] = self.reqID
self.reqQueue.put(req)
# 返回请求编号
return self.reqID
####################################################
## 主动函数
####################################################
#----------------------------------------------------------------------
def init(self, accessKey, secretKey):
"""初始化"""
self.accessKey = accessKey
self.secretKey = secretKey
self.active = True
self.reqThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
#----------------------------------------------------------------------
def getAccountInfo(self, market='cny'):
"""查询账户"""
method = FUNCTIONCODE_GETACCOUNTINFO
params = {}
callback = self.onGetAccountInfo
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getOrders(self, coinType=COINTYPE_BTC, market='cny'):
"""查询委托"""
method = FUNCTIONCODE_GETORDERS
params = {'coin_type': coinType}
callback = self.onGetOrders
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def orderInfo(self, id_, coinType=COINTYPE_BTC, market='cny'):
"""获取委托详情"""
method = FUNCTIONCODE_ORDERINFO
params = {
'coin_type': coinType,
'id': id_
}
callback = self.onOrderInfo
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def buy(self, price, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""委托买入"""
method = FUNCTIONCODE_BUY
params = {
'coin_type': coinType,
'price': price,
'amount': amount
}
callback = self.onBuy
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def sell(self, price, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""委托卖出"""
method = FUNCTIONCODE_SELL
params = {
'coin_type': coinType,
'price': price,
'amount': amount
}
callback = self.onSell
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def buyMarket(self, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""市价买入"""
method = FUNCTIONCODE_BUYMARKET
params = {
'coin_type': coinType,
'amount': amount
}
callback = self.onBuyMarket
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def sellMarket(self, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""市价卖出"""
method = FUNCTIONCODE_SELLMARKET
params = {
'coin_type': coinType,
'amount': amount
}
callback = self.onSellMarket
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def cancelOrder(self, id_, coinType=COINTYPE_BTC, market='cny'):
"""撤销委托"""
method = FUNCTIONCODE_CANCELORDER
params = {
'coin_type': coinType,
'id': id_
}
callback = self.onCancelOrder
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getNewDealOrders(self, market='cny'):
"""查询最新10条成交"""
method = FUNCTIONCODE_GETNEWDEALORDERS
params = {}
callback = self.onGetNewDealOrders
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getOrderIdByTradeId(self, tradeId, coinType=COINTYPE_BTC,
market='cny'):
"""通过成交编号查询委托编号"""
method = FUNCTIONCODE_GETORDERIDBYTRADEID
params = {
'coin_type': coinType,
'trade_id': tradeId
}
callback = self.onGetOrderIdByTradeId
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def withdrawCoin(self, withdrawAddress, withdrawAmount,
coinType=COINTYPE_BTC, tradePassword='',
market='cny', withdrawFee=0.0001):
"""提币"""
method = FUNCTIONCODE_WITHDRAWCOIN
params = {
'coin_type': coinType,
'withdraw_address': withdrawAddress,
'withdraw_amount': withdrawAmount
}
callback = self.onWithdrawCoin
optional = {
'market': market,
'withdraw_fee': withdrawFee
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def cancelWithdrawCoin(self, id_, market='cny'):
"""取消提币"""
method = FUNCTIONCODE_CANCELWITHDRAWCOIN
params = {'withdraw_coin_id': id_}
callback = self.onCancelWithdrawCoin
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def onGetWithdrawCoinResult(self, id_, market='cny'):
"""查询提币结果"""
method = FUNCTIONCODE_GETWITHDRAWCOINRESULT
params = {'withdraw_coin_id': id_}
callback = self.onGetWithdrawCoinResult
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def transfer(self, amountFrom, amountTo, amount,
coinType=COINTYPE_BTC ):
"""账户内转账"""
method = FUNCTIONCODE_TRANSFER
params = {
'amount_from': amountFrom,
'amount_to': amountTo,
'amount': amount,
'coin_type': coinType
}
callback = self.onTransfer
optional = {}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def loan(self, amount, loan_type=LOANTYPE_CNY,
market=MARKETTYPE_CNY):
"""申请杠杆"""
method = FUNCTIONCODE_LOAN
params = {
'amount': amount,
'loan_type': loan_type
}
callback = self.onLoan
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def repayment(self, id_, amount, repayAll=0,
market=MARKETTYPE_CNY):
"""归还杠杆"""
method = FUNCTIONCODE_REPAYMENT
params = {
'loan_id': id_,
'amount': amount
}
callback = self.onRepayment
optional = {
'repay_all': repayAll,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getLoanAvailable(self, market='cny'):
"""查询杠杆额度"""
method = FUNCTIONCODE_GETLOANAVAILABLE
params = {}
callback = self.onLoanAvailable
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getLoans(self, market='cny'):
"""查询杠杆列表"""
method = FUNCTIONCODE_GETLOANS
params = {}
callback = self.onGetLoans
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
####################################################
## 回调函数
####################################################
#----------------------------------------------------------------------
def onError(self, error, req, reqID):
"""错误推送"""
print error, reqID
#----------------------------------------------------------------------
def onGetAccountInfo(self, data, req, reqID):
"""查询账户回调"""
print data
#----------------------------------------------------------------------
def onGetOrders(self, data, req, reqID, fuck):
"""查询委托回调"""
print data
#----------------------------------------------------------------------
def onOrderInfo(self, data, req, reqID):
"""委托详情回调"""
print data
#----------------------------------------------------------------------
def onBuy(self, data, req, reqID):
"""买入回调"""
print data
#----------------------------------------------------------------------
def onSell(self, data, req, reqID):
"""卖出回调"""
print data
#----------------------------------------------------------------------
def onBuyMarket(self, data, req, reqID):
"""市价买入回调"""
print data
#----------------------------------------------------------------------
def onSellMarket(self, data, req, reqID):
"""市价卖出回调"""
print data
#----------------------------------------------------------------------
def onCancelOrder(self, data, req, reqID):
"""撤单回调"""
print data
#----------------------------------------------------------------------
def onGetNewDealOrders(self, data, req, reqID):
"""查询最新成交回调"""
print data
#----------------------------------------------------------------------
def onGetOrderIdByTradeId(self, data, req, reqID):
"""通过成交编号查询委托编号回调"""
print data
#----------------------------------------------------------------------
def onWithdrawCoin(self, data, req, reqID):
"""提币回调"""
print data
#----------------------------------------------------------------------
def onCancelWithdrawCoin(self, data, req, reqID):
"""取消提币回调"""
print data
#----------------------------------------------------------------------
def onGetWithdrawCoinResult(self, data, req, reqID):
"""查询提币结果回调"""
print data
#----------------------------------------------------------------------
def onTransfer(self, data, req, reqID):
"""转账回调"""
print data
#----------------------------------------------------------------------
def onLoan(self, data, req, reqID):
"""申请杠杆回调"""
print data
#----------------------------------------------------------------------
def onRepayment(self, data, req, reqID):
"""归还杠杆回调"""
print data
#----------------------------------------------------------------------
def onLoanAvailable(self, data, req, reqID):
"""查询杠杆额度回调"""
print data
#----------------------------------------------------------------------
def onGetLoans(self, data, req, reqID):
"""查询杠杆列表"""
print data
########################################################################
class DataApi(object):
"""行情接口"""
TICK_SYMBOL_URL = {
SYMBOL_ETHBTC: 'https://api.huobi.pro/market/detail_btc_json.js',
SYMBOL_LTCBTC: 'https://api.huobi.pro/market/detail_ltc_json.js',
SYMBOL_LTCETH: 'https://api.huobi.pro/market/detail_btc_json.js'
}
QUOTE_SYMBOL_URL = {
SYMBOL_ETHBTC: 'https://api.huobi.pro/market/ticker_btc_json.js',
SYMBOL_LTCBTC: 'https://api.huobi.pro/market/ticker_ltc_json.js',
SYMBOL_LTCETH: 'https://api.huobi.pro/market/ticker_btc_json.js'
}
DEPTH_SYMBOL_URL = {
SYMBOL_ETHBTC: 'https://api.huobi.pro/market/ddepth?symbol=ethusdt&type=step1',
SYMBOL_LTCBTC: 'https://api.huobi.pro/market/depth_ltc_json.js',
SYMBOL_LTCETH: 'https://api.huobi.pro/market/depth_btc_json.js'
}
KLINE_SYMBOL_URL = {
SYMBOL_ETHBTC: 'https://api.huobi.pro/market/btc_kline_[period]_json.js',
SYMBOL_LTCBTC: 'https://api.huobi.pro/market/btc_kline_[period]_json.js',
SYMBOL_LTCETH: 'https://api.huobi.pro/market/btc_kline_[period]_json.js'
}
DEBUG = True
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.active = False
self.taskInterval = 0 # 每轮请求延时
self.taskList = [] # 订阅的任务列表
self.taskThread = Thread(target=self.run) # 处理任务的线程
#----------------------------------------------------------------------
def init(self, interval, debug):
"""初始化"""
self.taskInterval = interval
self.DEBUG = debug
self.active = True
self.taskThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.taskThread.isAlive():
self.taskThread.join()
#----------------------------------------------------------------------
def run(self):
"""连续运行"""
while self.active:
for url, callback in self.taskList:
try:
r = requests.get(url)
if r.status_code == 200:
data = r.json()
if self.DEBUG:
print callback.__name__
callback(data)
except Exception, e:
print e
sleep(self.taskInterval)
#----------------------------------------------------------------------
def subscribeTick(self, symbol):
"""订阅实时成交数据"""
url = self.TICK_SYMBOL_URL[symbol]
task = (url, self.onTick)
self.taskList.append(task)
#----------------------------------------------------------------------
def subscribeQuote(self, symbol):
"""订阅实时报价数据"""
url = self.QUOTE_SYMBOL_URL[symbol]
task = (url, self.onQuote)
self.taskList.append(task)
#----------------------------------------------------------------------
def subscribeDepth(self, symbol, level=0):
"""订阅深度数据"""
url = self.DEPTH_SYMBOL_URL[symbol]
if level:
url = url.replace('json', str(level))
task = (url, self.onDepth)
self.taskList.append(task)
#----------------------------------------------------------------------
def onTick(self, data):
"""实时成交推送"""
print data
#----------------------------------------------------------------------
def onQuote(self, data):
"""实时报价推送"""
print data
#----------------------------------------------------------------------
def onDepth(self, data):
"""实时深度推送"""
print data
#----------------------------------------------------------------------
def getKline(self, symbol, period, length=0):
"""查询K线数据"""
url = self.KLINE_SYMBOL_URL[symbol]
url = url.replace('[period]', period)
if length:
url = url + '?length=' + str(length)
try:
r = requests.get(url)
if r.status_code == 200:
data = r.json()
return data
except Exception, e:
print e
return None
|
base_container.py
|
import threading
import time
import socket
import struct
import select
import utils
from xlog import getLogger
xlog = getLogger("x_tunnel")
class WriteBuffer(object):
def __init__(self, s=None):
if isinstance(s, str):
self.string_len = len(s)
self.buffer_list = [s]
else:
self.reset()
def reset(self):
self.buffer_list = []
self.string_len = 0
def __len__(self):
return self.string_len
def __add__(self, other):
self.append(other)
return self
def insert(self, s):
if isinstance(s, WriteBuffer):
self.buffer_list = s.buffer_list + self.buffer_list
self.string_len += s.string_len
elif isinstance(s, str):
self.buffer_list.insert(0, s)
self.string_len += len(s)
else:
raise Exception("WriteBuffer append not string or StringBuffer")
def append(self, s):
if isinstance(s, WriteBuffer):
self.buffer_list.extend(s.buffer_list)
self.string_len += s.string_len
elif isinstance(s, str):
self.buffer_list.append(s)
self.string_len += len(s)
else:
raise Exception("WriteBuffer append not string or StringBuffer")
def __str__(self):
return self.get_string()
def get_string(self):
return "".join(self.buffer_list)
class ReadBuffer(object):
def __init__(self, buf, begin=0, size=None):
buf_len = len(buf)
if size is None:
if begin > buf_len:
raise Exception("ReadBuffer buf_len:%d, start:%d" % (buf_len, begin))
size = buf_len - begin
elif begin + size > buf_len:
raise Exception("ReadBuffer buf_len:%d, start:%d len:%d" % (buf_len, begin, size))
self.size = size
self.buf = memoryview(buf)
self.begin = begin
def __len__(self):
return self.size
def get(self, size=None):
if size is None:
size = self.size
elif size > self.size:
raise Exception("ReadBuffer get %d but left %d" % (size, self.size))
data = self.buf[self.begin:self.begin + size]
self.begin += size
self.size -= size
return data
def get_buf(self, size=None):
if size is None:
size = self.size
elif size > self.size:
raise Exception("ReadBuffer get %d but left %d" % (size, self.size))
buf = ReadBuffer(self.buf, self.begin, size)
self.begin += size
self.size -= size
return buf
class AckPool():
def __init__(self):
self.mutex = threading.Lock()
self.reset()
def reset(self):
# xlog.info("Ack_pool reset")
self.mutex.acquire()
self.ack_buffer = WriteBuffer()
self.mutex.release()
# xlog.info("Ack_pool reset finished")
def put(self, data):
# xlog.debug("Ack_pool put len:%d", len(data))
self.mutex.acquire()
self.ack_buffer.append(data)
self.mutex.release()
def get(self):
self.mutex.acquire()
data = self.ack_buffer
self.ack_buffer = WriteBuffer()
self.mutex.release()
# xlog.debug("Ack_pool get len:%d", len(data))
return data
def status(self):
out_string = "Ack_pool:len %d<br>\r\n" % len(self.ack_buffer)
return out_string
class WaitQueue():
def __init__(self):
self.lock = threading.Lock()
self.waiters = []
# (end_time, Lock())
self.running = True
def stop(self):
self.running = False
xlog.info("WaitQueue stop")
for end_time, lock in self.waiters:
lock.release()
self.waiters = []
xlog.info("WaitQueue stop finished")
def notify(self):
# xlog.debug("notify")
if len(self.waiters) == 0:
# xlog.debug("notify none.")
return
try:
end_time, lock = self.waiters.pop(0)
lock.release()
except:
pass
def wait(self, end_time):
with self.lock:
lock = threading.Lock()
lock.acquire()
if len(self.waiters) == 0:
self.waiters.append((end_time, lock))
else:
is_max = True
for i in range(0, len(self.waiters)):
try:
iend_time, ilock = self.waiters[i]
if iend_time > end_time:
is_max = False
break
except Exception as e:
if i >= len(self.waiters):
break
xlog.warn("get %d from size:%d fail.", i, len(self.waiters))
continue
if is_max:
self.waiters.append((end_time, lock))
else:
self.waiters.insert(i, (end_time, lock))
lock.acquire()
def status(self):
out_string = "waiters[%d]:<br>\n" % len(self.waiters)
for i in range(0, len(self.waiters)):
end_time, lock = self.waiters[i]
out_string += "%d<br>\r\n" % ((end_time - time.time()))
return out_string
class SendBuffer():
def __init__(self, max_payload):
self.mutex = threading.Lock()
self.max_payload = max_payload
self.reset()
def reset(self):
self.pool_size = 0
self.last_put_time = time.time()
with self.mutex:
self.head_sn = 1
self.tail_sn = 1
self.block_list = {}
self.last_block = WriteBuffer()
def put(self, data):
dlen = len(data)
if dlen == 0:
xlog.warn("SendBuffer put 0")
return
# xlog.debug("SendBuffer put len:%d", len(data))
self.last_put_time = time.time()
with self.mutex:
self.pool_size += dlen
self.last_block.append(data)
if len(self.last_block) > self.max_payload:
self.block_list[self.head_sn] = self.last_block
self.last_block = WriteBuffer()
self.head_sn += 1
return True
def get(self):
with self.mutex:
if self.tail_sn < self.head_sn:
data = self.block_list[self.tail_sn]
del self.block_list[self.tail_sn]
sn = self.tail_sn
self.tail_sn += 1
self.pool_size -= len(data)
# xlog.debug("send_pool get, sn:%r len:%d ", sn, len(data))
return data, sn
if len(self.last_block) > 0:
data = self.last_block
sn = self.tail_sn
self.last_block = WriteBuffer()
self.head_sn += 1
self.tail_sn += 1
self.pool_size -= len(data)
# xlog.debug("send_pool get, sn:%r len:%d ", sn, len(data))
return data, sn
#xlog.debug("Get:%s", utils.str2hex(data))
# xlog.debug("SendBuffer get wake after no data, tail:%d", self.tail_sn)
return "", 0
def status(self):
out_string = "SendBuffer:<br>\n"
out_string += " size:%d<br>\n" % self.pool_size
out_string += " last_put_time:%f<br>\n" % (time.time() - self.last_put_time)
out_string += " head_sn:%d<br>\n" % self.head_sn
out_string += " tail_sn:%d<br>\n" % self.tail_sn
out_string += "block_list:[%d]<br>\n" % len(self.block_list)
for sn in sorted(self.block_list.iterkeys()):
data = self.block_list[sn]
out_string += "[%d] len:%d<br>\r\n" % (sn, len(data))
return out_string
class BlockReceivePool():
def __init__(self, process_callback):
self.lock = threading.Lock()
self.process_callback = process_callback
self.reset()
def reset(self):
# xlog.info("recv_pool reset")
self.next_sn = 1
self.block_list = []
def put(self, sn, data):
self.lock.acquire()
try:
if sn < self.next_sn:
# xlog.warn("recv_pool put timeout sn:%d", sn)
return False
elif sn > self.next_sn:
# xlog.debug("recv_pool put unorder sn:%d", sn)
if sn in self.block_list:
# xlog.warn("recv_pool put sn:%d exist", sn)
return False
else:
self.block_list.append(sn)
self.process_callback(data)
return True
else:
# xlog.debug("recv_pool put sn:%d in order", sn)
self.process_callback(data)
self.next_sn = sn + 1
while sn + 1 in self.block_list:
sn += 1
# xlog.debug("recv_pool sn:%d processed", sn)
self.block_list.remove(sn)
self.next_sn = sn + 1
return True
except Exception as e:
raise Exception("recv_pool put sn:%d len:%d error:%r" % (sn, len(data), e))
finally:
self.lock.release()
def status(self):
out_string = "Block_receive_pool:<br>\r\n"
out_string += " next_sn:%d<br>\r\n" % self.next_sn
for sn in sorted(self.block_list):
out_string += "[%d] <br>\r\n" % (sn)
return out_string
class Conn(object):
def __init__(self, session, conn_id, sock, host, port, windows_size, windows_ack, is_client, xlog):
# xlog.info("session:%s Conn:%d host:%s port:%d", session.session_id, conn_id, host, port)
self.host = host
self.port = port
self.session = session
self.conn_id = conn_id
self.sock = sock
self.windows_size = windows_size
self.windows_ack = windows_ack
self.is_client = is_client
self.cmd_queue = {}
self.cmd_notice = threading.Condition()
self.recv_notice = threading.Condition()
self.running = True
self.received_position = 0
self.remote_acked_position = 0
self.sended_position = 0
self.sended_window_position = 0
self.recv_thread = None
self.cmd_thread = None
self.xlog = xlog
self.transfered_close_to_peer = False
if sock:
self.next_cmd_seq = 1
else:
self.next_cmd_seq = 0
self.next_recv_seq = 1
def start(self, block):
if self.sock:
self.recv_thread = threading.Thread(target=self.recv_worker)
self.recv_thread.start()
else:
self.recv_thread = None
if block:
self.cmd_thread = None
self.cmd_processor()
else:
self.cmd_thread = threading.Thread(target=self.cmd_processor)
self.cmd_thread.start()
def status(self):
out_string = "Conn[%d]: %s:%d<br>\r\n" % (self.conn_id, self.host, self.port)
out_string += " received_position:%d/ Ack:%d <br>\n" % (self.received_position, self.remote_acked_position)
out_string += " sended_position:%d/ win:%d<br>\n" % (self.sended_position, self.sended_window_position)
out_string += " next_cmd_seq:%d<br>\n" % self.next_cmd_seq
out_string += " next_recv_seq:%d<br>\n" % self.next_recv_seq
out_string += " status: running:%r<br>\n" % self.running
out_string += " transfered_close_to_peer:%r<br>\n" % self.transfered_close_to_peer
out_string += " sock:%r<br>\n" % (self.sock is not None)
out_string += " cmd_queue.len:%d " % len(self.cmd_queue)
for seq in self.cmd_queue:
out_string += "[%d]," % seq
out_string += "<br>\n"
return out_string
def stop(self, reason=""):
self.stop_thread = threading.Thread(target=self.do_stop, args=(reason,))
self.stop_thread.start()
def do_stop(self, reason="unknown"):
self.xlog.debug("Conn session:%s conn:%d stop:%s", self.session.session_id, self.conn_id, reason)
self.running = False
self.cmd_notice.acquire()
self.cmd_notice.notify()
self.cmd_notice.release()
self.recv_notice.acquire()
self.recv_notice.notify()
self.recv_notice.release()
if self.recv_thread:
self.recv_thread.join()
self.recv_thread = None
if self.cmd_thread:
self.cmd_thread.join()
self.cmd_thread = None
self.cmd_queue = {}
if self.sock is not None:
self.sock.close()
self.sock = None
# xlog.debug("Conn session:%s conn:%d stopped", self.session.session_id, self.conn_id)
self.session.remove_conn(self.conn_id)
def do_connect(self, host, port):
self.xlog.info("session_id:%s create_conn %d %s:%d", self.session.session_id, self.conn_id, host, port)
connect_timeout = 30
sock = None
# start_time = time.time()
ip = ""
try:
if ':' in host:
# IPV6
ip = host
elif utils.check_ip_valid4(host):
# IPV4
ip = host
else:
# xlog.debug("getting ip of %s", host)
ip = socket.gethostbyname(host)
# xlog.debug("resolve %s to %s", host, ip)
sock = socket.socket(socket.AF_INET if ':' not in ip else socket.AF_INET6)
# set reuseaddr option to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# resize socket recv buffer 8K->32K to improve browser releated application performance
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32 * 1024)
# disable negal algorithm to send http request quickly.
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
# set a short timeout to trigger timeout retry more quickly.
sock.settimeout(connect_timeout)
sock.connect((ip, port))
# record TCP connection time
# conn_time = time.time() - start_time
# xlog.debug("tcp conn %s %s time:%d", host, ip, conn_time * 1000)
return sock, True
except Exception as e:
# conn_time = int((time.time() - start_time) * 1000)
# xlog.debug("tcp conn host:%s %s:%d fail t:%d %r", host, ip, port, conn_time, e)
if sock:
sock.close()
return e, False
def put_cmd_data(self, data):
with self.cmd_notice:
seq = struct.unpack("<I", data.get(4))[0]
if seq < self.next_cmd_seq:
# xlog.warn("put_send_data %s conn:%d seq:%d next:%d",
# self.session.session_id, self.conn_id,
# seq, self.next_cmd_seq)
return
self.cmd_queue[seq] = data.get_buf()
if seq == self.next_cmd_seq:
self.cmd_notice.notify()
def get_cmd_data(self):
self.cmd_notice.acquire()
try:
while self.running:
if self.next_cmd_seq in self.cmd_queue:
payload = self.cmd_queue[self.next_cmd_seq]
del self.cmd_queue[self.next_cmd_seq]
self.next_cmd_seq += 1
#self.xlog.debug("Conn session:%s conn:%d get data len:%d ", self.session.session_id, self.conn_id, len(payload))
return payload
else:
self.cmd_notice.wait()
finally:
self.cmd_notice.release()
return False
def cmd_processor(self):
while self.running:
data = self.get_cmd_data()
if not data:
break
cmd_id = struct.unpack("<B", data.get(1))[0]
if cmd_id == 1: # data
self.send_to_sock(data)
elif cmd_id == 3: # ack:
position = struct.unpack("<Q", data.get(8))[0]
self.xlog.debug("Conn session:%s conn:%d ACK:%d", self.session.session_id, self.conn_id, position)
if position > self.remote_acked_position:
self.remote_acked_position = position
self.recv_notice.acquire()
self.recv_notice.notify()
self.recv_notice.release()
elif cmd_id == 2: # Closed
dat = data.get()
if isinstance(dat, memoryview):
dat = dat.tobytes()
self.xlog.debug("Conn session:%s conn:%d Peer Close:%s", self.session.session_id, self.conn_id, dat)
if self.is_client:
self.transfer_peer_close("finish")
self.stop("peer close")
elif cmd_id == 0: # Create connect
if self.port or len(self.host) or self.next_cmd_seq != 1 or self.sock:
raise Exception("put_send_data %s conn:%d Create but host:%s port:%d next seq:%d" % (
self.session.session_id, self.conn_id,
self.host, self.port, self.next_cmd_seq))
self.sock_type = struct.unpack("<B", data.get(1))[0]
host_len = struct.unpack("<H", data.get(2))[0]
self.host = data.get(host_len)
self.port = struct.unpack("<H", data.get(2))[0]
sock, res = self.do_connect(self.host, self.port)
if res is False:
self.xlog.debug("Conn session:%s conn:%d %s:%d Create fail", self.session.session_id, self.conn_id,
self.host, self.port)
self.transfer_peer_close("connect fail")
else:
self.xlog.info("Conn session:%s conn:%d %s:%d", self.session.session_id, self.conn_id, self.host,
self.port)
self.sock = sock
self.recv_thread = threading.Thread(target=self.recv_worker)
self.recv_thread.start()
else:
self.xlog.error("Conn session:%s conn:%d unknown cmd_id:%d",
self.session.session_id, self.conn_id, cmd_id)
raise Exception("put_send_data unknown cmd_id:%d" % cmd_id)
def send_to_sock(self, data):
sock = self.sock
if not sock:
return
payload_len = len(data)
buf = data.buf
start = data.begin
end = data.begin + payload_len
while start < end:
send_size = min(end - start, 65535)
try:
sended = sock.send(buf[start:start + send_size])
except Exception as e:
self.xlog.info("%s conn_id:%d send closed", self.session.session_id, self.conn_id)
sock.close()
self.sock = None
if self.is_client:
self.do_stop(reason="send fail.")
return
start += sended
self.sended_position += payload_len
if self.sended_position - self.sended_window_position > self.windows_ack:
self.sended_window_position = self.sended_position
self.transfer_ack(self.sended_position)
# xlog.debug("Conn:%d ack:%d", self.conn_id, self.sended_window_position)
def transfer_peer_close(self, reason=""):
with self.recv_notice:
if self.transfered_close_to_peer:
return
self.transfered_close_to_peer = True
cmd = struct.pack("<IB", self.next_recv_seq, 2)
self.session.send_conn_data(self.conn_id, cmd + reason)
self.next_recv_seq += 1
def transfer_received_data(self, data):
with self.recv_notice:
if self.transfered_close_to_peer:
return
buf = WriteBuffer(struct.pack("<IB", self.next_recv_seq, 1))
buf.append(data)
self.next_recv_seq += 1
self.received_position += len(data)
if self.received_position < 16 * 1024:
no_delay = True
else:
no_delay = False
self.session.send_conn_data(self.conn_id, buf, no_delay)
def transfer_ack(self, position):
with self.recv_notice:
if self.transfered_close_to_peer:
return
cmd_position = struct.pack("<IBQ", self.next_recv_seq, 3, position)
self.session.send_conn_data(self.conn_id, cmd_position)
self.next_recv_seq += 1
def recv_worker(self):
sock = self.sock
fdset = [sock, ]
while self.running:
self.recv_notice.acquire()
try:
if self.received_position > self.remote_acked_position + self.windows_size:
# xlog.debug("Conn session:%s conn:%d recv blocked, rcv:%d, ack:%d", self.session.session_id, self.conn_id, self.received_position, self.remote_acked_position)
self.recv_notice.wait()
continue
finally:
self.recv_notice.release()
r, w, e = select.select(fdset, [], [], 1)
if sock in r:
try:
data = sock.recv(65535)
except:
data = ""
data_len = len(data)
if data_len == 0:
# xlog.debug("Conn session:%s conn:%d recv socket closed", self.session.session_id, self.conn_id)
self.transfer_peer_close("recv closed")
sock.close()
self.sock = None
self.recv_thread = None
if self.is_client:
self.do_stop(reason="recv fail.")
return
self.transfer_received_data(data)
# xlog.debug("Conn session:%s conn:%d Recv len:%d id:%d", self.session.session_id, self.conn_id, data_len, self.recv_id)
# xlog.debug("Conn session:%s conn:%d Recv worker stopped", self.session.session_id, self.conn_id)
|
02_cheetah_es.py
|
#!/usr/bin/env python3
import gym
import roboschool
import ptan
import time
import argparse
import numpy as np
import collections
import torch
import torch.nn as nn
from torch import multiprocessing as mp
from torch import optim
from tensorboardX import SummaryWriter
NOISE_STD = 0.05
LEARNING_RATE = 0.01
PROCESSES_COUNT = 6
ITERS_PER_UPDATE = 10
MAX_ITERS = 100000
# result item from the worker to master. Fields:
# 1. random seed used to generate noise
# 2. reward obtained from the positive noise
# 3. reward obtained from the negative noise
# 4. total amount of steps done
RewardsItem = collections.namedtuple('RewardsItem', field_names=['seed', 'pos_reward', 'neg_reward', 'steps'])
def make_env():
return gym.make("RoboschoolHalfCheetah-v1")
class Net(nn.Module):
def __init__(self, obs_size, act_size, hid_size=64):
super(Net, self).__init__()
self.mu = nn.Sequential(
nn.Linear(obs_size, hid_size),
nn.Tanh(),
nn.Linear(hid_size, hid_size),
nn.Tanh(),
nn.Linear(hid_size, act_size),
nn.Tanh(),
)
def forward(self, x):
return self.mu(x)
def evaluate(env, net, device="cpu"):
obs = env.reset()
reward = 0.0
steps = 0
while True:
obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
action_v = net(obs_v)
action = action_v.data.cpu().numpy()[0]
obs, r, done, _ = env.step(action)
reward += r
steps += 1
if done:
break
return reward, steps
def sample_noise(net, device="cpu"):
res = []
neg = []
for p in net.parameters():
noise_t = torch.FloatTensor(np.random.normal(size=p.data.size()).astype(np.float32)).to(device)
res.append(noise_t)
neg.append(-noise_t)
return res, neg
def eval_with_noise(env, net, noise, noise_std, device="cpu"):
for p, p_n in zip(net.parameters(), noise):
p.data += noise_std * p_n
r, s = evaluate(env, net, device)
for p, p_n in zip(net.parameters(), noise):
p.data -= noise_std * p_n
return r, s
def compute_ranks(x):
"""
Returns ranks in [0, len(x))
Note: This is different from scipy.stats.rankdata, which returns ranks in [1, len(x)].
"""
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
def compute_centered_ranks(x):
y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)
y /= (x.size - 1)
y -= .5
return y
def train_step(optimizer, net, batch_noise, batch_reward, writer, step_idx, noise_std):
weighted_noise = None
norm_reward = compute_centered_ranks(np.array(batch_reward))
for noise, reward in zip(batch_noise, norm_reward):
if weighted_noise is None:
weighted_noise = [reward * p_n for p_n in noise]
else:
for w_n, p_n in zip(weighted_noise, noise):
w_n += reward * p_n
m_updates = []
optimizer.zero_grad()
for p, p_update in zip(net.parameters(), weighted_noise):
update = p_update / (len(batch_reward) * noise_std)
p.grad = -update
m_updates.append(torch.norm(update))
writer.add_scalar("update_l2", np.mean(m_updates), step_idx)
optimizer.step()
def worker_func(worker_id, params_queue, rewards_queue, device, noise_std):
env = make_env()
net = Net(env.observation_space.shape[0], env.action_space.shape[0]).to(device)
net.eval()
while True:
params = params_queue.get()
if params is None:
break
net.load_state_dict(params)
for _ in range(ITERS_PER_UPDATE):
seed = np.random.randint(low=0, high=65535)
np.random.seed(seed)
noise, neg_noise = sample_noise(net, device=device)
pos_reward, pos_steps = eval_with_noise(env, net, noise, noise_std, device=device)
neg_reward, neg_steps = eval_with_noise(env, net, neg_noise, noise_std, device=device)
rewards_queue.put(RewardsItem(seed=seed, pos_reward=pos_reward,
neg_reward=neg_reward, steps=pos_steps+neg_steps))
pass
if __name__ == "__main__":
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action='store_true', help="Enable CUDA mode")
parser.add_argument("--lr", type=float, default=LEARNING_RATE)
parser.add_argument("--noise-std", type=float, default=NOISE_STD)
parser.add_argument("--iters", type=int, default=MAX_ITERS)
args = parser.parse_args()
device = "cuda" if args.cuda else "cpu"
writer = SummaryWriter(comment="-cheetah-es_lr=%.3e_sigma=%.3e" % (args.lr, args.noise_std))
env = make_env()
net = Net(env.observation_space.shape[0], env.action_space.shape[0])
print(net)
params_queues = [mp.Queue(maxsize=1) for _ in range(PROCESSES_COUNT)]
rewards_queue = mp.Queue(maxsize=ITERS_PER_UPDATE)
workers = []
for idx, params_queue in enumerate(params_queues):
proc = mp.Process(target=worker_func, args=(idx, params_queue, rewards_queue, device, args.noise_std))
proc.start()
workers.append(proc)
print("All started!")
optimizer = optim.Adam(net.parameters(), lr=args.lr)
for step_idx in range(args.iters):
# broadcasting network params
params = net.state_dict()
for q in params_queues:
q.put(params)
# waiting for results
t_start = time.time()
batch_noise = []
batch_reward = []
results = 0
batch_steps = 0
batch_steps_data = []
while True:
while not rewards_queue.empty():
reward = rewards_queue.get_nowait()
np.random.seed(reward.seed)
noise, neg_noise = sample_noise(net)
batch_noise.append(noise)
batch_reward.append(reward.pos_reward)
batch_noise.append(neg_noise)
batch_reward.append(reward.neg_reward)
results += 1
batch_steps += reward.steps
batch_steps_data.append(reward.steps)
if results == PROCESSES_COUNT * ITERS_PER_UPDATE:
break
time.sleep(0.01)
dt_data = time.time() - t_start
m_reward = np.mean(batch_reward)
train_step(optimizer, net, batch_noise, batch_reward, writer, step_idx, args.noise_std)
writer.add_scalar("reward_mean", m_reward, step_idx)
writer.add_scalar("reward_std", np.std(batch_reward), step_idx)
writer.add_scalar("reward_max", np.max(batch_reward), step_idx)
writer.add_scalar("batch_episodes", len(batch_reward), step_idx)
writer.add_scalar("batch_steps", batch_steps, step_idx)
speed = batch_steps / (time.time() - t_start)
writer.add_scalar("speed", speed, step_idx)
dt_step = time.time() - t_start - dt_data
print("%d: reward=%.2f, speed=%.2f f/s, data_gather=%.3f, train=%.3f, steps_mean=%.2f, min=%.2f, max=%.2f, steps_std=%.2f" % (
step_idx, m_reward, speed, dt_data, dt_step, np.mean(batch_steps_data),
np.min(batch_steps_data), np.max(batch_steps_data), np.std(batch_steps_data)))
for worker, p_queue in zip(workers, params_queues):
p_queue.put(None)
worker.join()
|
zuul_swift_upload.py
|
#!/usr/bin/env python3
#
# Copyright 2014 Rackspace Australia
# Copyright 2018 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Utility to upload files to swift
"""
import argparse
import logging
import mimetypes
import os
import queue as queuelib
import stat
import sys
import tempfile
import threading
import time
import zlib
import collections
import openstack
import requests
import requests.exceptions
import requestsexceptions
from ansible.module_utils.basic import AnsibleModule
try:
# Python 3.3+
from collections.abc import Sequence
except ImportError:
from collections import Sequence
mimetypes.init()
mimetypes.add_type('text/plain', '.yaml')
MAX_UPLOAD_THREADS = 24
POST_ATTEMPTS = 3
# Map mime types to apache icons
APACHE_MIME_ICON_MAP = {
'_default': 'unknown.png',
'application/gzip': 'compressed.png',
'application/directory': 'folder.png',
'text/html': 'text.png',
'text/plain': 'text.png',
}
# Map mime types to apache icons
APACHE_FILE_ICON_MAP = {
'..': 'back.png',
}
# These icon files are from the Apache project and are in the public
# domain.
ICON_IMAGES = {
'back.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
'///M//+ZmZlmZmYzMzMAAACei5rnAAAAAnRSTlP/AOW3MEoAAABWSURB'
'VHjabdBBCgAhDEPRRpv7X3kwEMsQ//IRRC08urjRHbha5VLFUsVSxVI9'
'lmDh5hMpHD6n0EgoiZG0DNINpnWlcVXaRix76e1/8dddcL6nG0Ri9gHj'
'tgSXKYeLBgAAAABJRU5ErkJggg==',
'compressed.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAADAFBM'
'VEX//////8z//5n//2b//zP//wD/zP//zMz/zJn/zGb/zDP/zAD/'
'mf//mcz/mZn/mWb/mTP/mQD/Zv//Zsz/Zpn/Zmb/ZjP/ZgD/M///'
'M8z/M5n/M2b/MzP/MwD/AP//AMz/AJn/AGb/ADP/AADM///M/8zM'
'/5nM/2bM/zPM/wDMzP/MzMzMzJnMzGbMzDPMzADMmf/MmczMmZnM'
'mWbMmTPMmQDMZv/MZszMZpnMZmbMZjPMZgDMM//MM8zMM5nMM2bM'
'MzPMMwDMAP/MAMzMAJnMAGbMADPMAACZ//+Z/8yZ/5mZ/2aZ/zOZ'
'/wCZzP+ZzMyZzJmZzGaZzDOZzACZmf+ZmcyZmZmZmWaZmTOZmQCZ'
'Zv+ZZsyZZpmZZmaZZjOZZgCZM/+ZM8yZM5mZM2aZMzOZMwCZAP+Z'
'AMyZAJmZAGaZADOZAABm//9m/8xm/5lm/2Zm/zNm/wBmzP9mzMxm'
'zJlmzGZmzDNmzABmmf9mmcxmmZlmmWZmmTNmmQBmZv9mZsxmZplm'
'ZmZmZjNmZgBmM/9mM8xmM5lmM2ZmMzNmMwBmAP9mAMxmAJlmAGZm'
'ADNmAAAz//8z/8wz/5kz/2Yz/zMz/wAzzP8zzMwzzJkzzGYzzDMz'
'zAAzmf8zmcwzmZkzmWYzmTMzmQAzZv8zZswzZpkzZmYzZjMzZgAz'
'M/8zM8wzM5kzM2YzMzMzMwAzAP8zAMwzAJkzAGYzADMzAAAA//8A'
'/8wA/5kA/2YA/zMA/wAAzP8AzMwAzJkAzGYAzDMAzAAAmf8AmcwA'
'mZkAmWYAmTMAmQAAZv8AZswAZpkAZmYAZjMAZgAAM/8AM8wAM5kA'
'M2YAMzMAMwAAAP8AAMwAAJkAAGYAADPuAADdAAC7AACqAACIAAB3'
'AABVAABEAAAiAAARAAAA7gAA3QAAuwAAqgAAiAAAdwAAVQAARAAA'
'IgAAEQAAAO4AAN0AALsAAKoAAIgAAHcAAFUAAEQAACIAABHu7u7d'
'3d27u7uqqqqIiIh3d3dVVVVEREQiIiIREREAAAD7CIKZAAAAJXRS'
'TlP///////////////////////////////////////////////8A'
'P89CTwAAAGtJREFUeNp9z9ENgDAIhOEOco+dybVuEXasFMRDY/x5'
'+xJCO6Znu6kSx7BhXyjtKBWWNlwW88Loid7hFRKBXiIYCMfMEYUQ'
'QohC3CjFA5nIjqx1CqlDLGR/EhM5O06yvin0ftGOyIS7lV14AsQN'
'aR7rMEBYAAAAAElFTkSuQmCC',
'folder.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
'////zJnM//+ZZjMzMzMAAADCEvqoAAAAA3RSTlP//wDXyg1BAAAASElE'
'QVR42s3KQQ6AQAhDUaXt/a/sQDrRJu7c+NmQB0e99B3lnqjT6cYx6zSI'
'bV40n3D7psYMoBoz4w8/EdNYQsbGEjNxYSljXTEsA9O1pLTvAAAAAElF'
'TkSuQmCC',
'text.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
'///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABISURBVHja'
'tcrRCgAgCENRbf7/N7dKomGvngjhMsPLD4NdMPwia438NRIyxsaL/XQZ'
'hyxpkC6zyjLXGVXnkhqWJWIIrOgeinECLlUCjBCqNQoAAAAASUVORK5C'
'YII=',
'unknown.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
'///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABYSURBVHja'
'ncvRDoAgDEPRruX/v1kmNHPBxMTLyzgD6FmsILg56g2hQnJkOco4yZhq'
'tN5nYd5Zq0LsHblwxwP9GTCWsaGtoelANKzOlz/RfaLYUmLE6E28ALlN'
'AupSdoFsAAAAAElFTkSuQmCC'}
def get_mime_icon(mime, filename=''):
icon = (APACHE_FILE_ICON_MAP.get(filename) or
APACHE_MIME_ICON_MAP.get(mime) or
APACHE_MIME_ICON_MAP['_default'])
return "data:image/png;base64,%s" % ICON_IMAGES[icon]
def sizeof_fmt(num, suffix='B'):
# From http://stackoverflow.com/questions/1094841/
# reusable-library-to-get-human-readable-version-of-file-size
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
class FileDetail():
"""
Used to generate indexes with links or as the file path
to push to swift.
"""
def __init__(self, full_path, relative_path, filename=None):
"""
Args:
full_path (str): The absolute path to the file on disk.
relative_path (str): The relative path from the artifacts source
used for links.
filename (str): An optional alternate filename in links.
"""
self.full_path = full_path
if filename is None:
self.filename = os.path.basename(full_path)
else:
self.filename = filename
self.relative_path = relative_path
if self.full_path and os.path.isfile(self.full_path):
mime_guess, encoding = mimetypes.guess_type(self.full_path)
self.mimetype = mime_guess if mime_guess else 'text/plain'
self.encoding = encoding
self.folder = False
else:
self.mimetype = 'application/directory'
self.encoding = None
self.folder = True
if self.full_path:
st = os.stat(self.full_path)
self.last_modified = time.gmtime(st[stat.ST_MTIME])
self.size = st[stat.ST_SIZE]
else:
self.last_modified = time.gmtime(0)
self.size = 0
def __repr__(self):
t = 'Folder' if self.folder else 'File'
return '<%s %s>' % (t, self.relative_path)
class FileList(Sequence):
def __init__(self):
self.file_list = []
self.file_list.append(FileDetail(None, '', ''))
def __getitem__(self, item):
return self.file_list.__getitem__(item)
def __len__(self):
return self.file_list.__len__()
@staticmethod
def _path_in_tree(root, path):
full_path = os.path.realpath(os.path.abspath(
os.path.expanduser(path)))
if not full_path.startswith(root):
logging.debug("Skipping path outside root: %s" % (path,))
return False
return True
def add(self, file_path):
"""
Generate a list of files to upload to swift. Recurses through
directories
"""
# file_list: A list of FileDetails to push to swift
file_list = []
if os.path.isfile(file_path):
relative_path = os.path.basename(file_path)
file_list.append(FileDetail(file_path, relative_path))
elif os.path.isdir(file_path):
original_root = os.path.realpath(os.path.abspath(
os.path.expanduser(file_path)))
parent_dir = os.path.dirname(file_path)
if not file_path.endswith('/'):
filename = os.path.basename(file_path)
full_path = file_path
relative_name = os.path.relpath(full_path, parent_dir)
file_list.append(FileDetail(full_path, relative_name,
filename))
# TODO: this will copy the result of symlinked files, but
# it won't follow directory symlinks. If we add that, we
# should ensure that we don't loop.
for path, folders, files in os.walk(file_path):
# Sort folder in-place so that we recurse in order.
files.sort(key=lambda x: x.lower())
folders.sort(key=lambda x: x.lower())
# relative_path: The path between the given directory
# and the one being currently walked.
relative_path = os.path.relpath(path, parent_dir)
for filename in folders:
full_path = os.path.join(path, filename)
if not self._path_in_tree(original_root, full_path):
continue
relative_name = os.path.relpath(full_path, parent_dir)
file_list.append(FileDetail(full_path, relative_name,
filename))
for filename in files:
full_path = os.path.join(path, filename)
if not self._path_in_tree(original_root, full_path):
continue
relative_name = os.path.relpath(full_path, parent_dir)
file_detail = FileDetail(full_path, relative_name)
file_list.append(file_detail)
self.file_list += file_list
class Indexer():
"""generates index.html files if requested."""
def __init__(self, create_parent_links=True,
create_topdir_parent_link=False,
append_footer='index_footer.html'):
self.create_parent_links = create_parent_links
self.create_topdir_parent_link = create_topdir_parent_link
self.append_footer = append_footer
self.index_filename = 'index.html'
def make_indexes(self, file_list):
folders = collections.OrderedDict()
for f in file_list:
if f.folder:
folders[f.relative_path] = []
folder = os.path.dirname(os.path.dirname(
f.relative_path + '/'))
if folder == '/':
folder = ''
else:
folder = os.path.dirname(f.relative_path)
folders[folder].append(f)
indexes = {}
parent_file_detail = FileDetail(None, '..', '..')
for folder, files in folders.items():
# Don't add the pseudo-top-directory
if files and files[0].full_path is None:
files = files[1:]
if self.create_topdir_parent_link:
files = [parent_file_detail] + files
elif self.create_parent_links:
files = [parent_file_detail] + files
# Do generate a link to the parent directory
full_path = self.make_index_file(files, 'Index of %s' % (folder,))
if full_path:
filename = os.path.basename(full_path)
relative_name = os.path.join(folder, filename)
indexes[folder] = FileDetail(full_path, relative_name)
# This appends the index file at the end of the group of files
# for each directory.
ret_file_list = FileList()
newlist = []
last_dirname = None
for f in reversed(list(file_list)):
if f.folder:
relative_path = f.relative_path + '/'
else:
relative_path = f.relative_path
dirname = os.path.dirname(relative_path)
if dirname == '/':
dirname = ''
if dirname != last_dirname:
index = indexes.pop(dirname, None)
if index:
newlist.append(index)
last_dirname = dirname
newlist.append(f)
newlist.reverse()
ret_file_list.file_list = newlist
return ret_file_list
def make_index_file(self, folder_links, title):
"""Writes an index into a file for pushing"""
for file_details in folder_links:
# Do not generate an index file if one exists already.
# This may be the case when uploading other machine generated
# content like python coverage info.
if self.index_filename == file_details.filename:
return
index_content = self.generate_log_index(folder_links, title)
tempdir = tempfile.mkdtemp()
fd = open(os.path.join(tempdir, self.index_filename), 'w')
fd.write(index_content)
return os.path.join(tempdir, self.index_filename)
def generate_log_index(self, folder_links, title):
"""Create an index of logfiles and links to them"""
output = '<html><head><title>%s</title></head><body>\n' % title
output += '<h1>%s</h1>\n' % title
output += '<table><tr><th></th><th>Name</th><th>Last Modified</th>'
output += '<th>Size</th></tr>'
file_details_to_append = None
for file_details in folder_links:
output += '<tr>'
output += (
'<td><img alt="[ ]" title="%(m)s" src="%(i)s"></img></td>' % ({
'm': file_details.mimetype,
'i': get_mime_icon(file_details.mimetype,
file_details.filename),
}))
filename = file_details.filename
if file_details.folder:
filename += '/'
output += '<td><a href="%s">%s</a></td>' % (filename,
filename)
output += '<td>%s</td>' % time.asctime(
file_details.last_modified)
if file_details.mimetype == 'folder':
size = str(file_details.size)
else:
size = sizeof_fmt(file_details.size, suffix='')
output += '<td style="text-align: right">%s</td>' % size
output += '</tr>\n'
if (self.append_footer and
self.append_footer in file_details.filename):
file_details_to_append = file_details
output += '</table>'
if file_details_to_append:
output += '<br /><hr />'
try:
with open(file_details_to_append.full_path, 'r') as f:
output += f.read()
except IOError:
logging.exception("Error opening file for appending")
output += '</body></html>\n'
return output
class DeflateFilter():
chunk_size = 16384
def __init__(self, infile):
self.infile = infile
self.encoder = zlib.compressobj()
self.done = False
def __iter__(self):
return self
def __next__(self):
if self.done:
raise StopIteration()
ret = b''
while True:
data = self.infile.read(self.chunk_size)
if data:
ret = self.encoder.compress(data)
if ret:
break
else:
self.done = True
ret = self.encoder.flush()
break
return ret
class Uploader():
def __init__(self, cloud, container, prefix=None, delete_after=None,
public=True):
if isinstance(cloud, dict):
config = openstack.config.loader.OpenStackConfig().get_one(**cloud)
self.cloud = openstack.connection.Connection(config=config)
else:
self.cloud = openstack.connect(cloud=cloud)
self.container = container
self.prefix = prefix or ''
self.delete_after = delete_after
sess = self.cloud.config.get_session()
adapter = requests.adapters.HTTPAdapter(pool_maxsize=100)
sess.mount('https://', adapter)
if not self.cloud.get_container(self.container):
self.cloud.create_container(name=self.container, public=public)
self.cloud.update_container(
name=self.container,
headers={'X-Container-Meta-Web-Index': 'index.html'})
# 'X-Container-Meta-Web-Listings': 'true'
# The ceph radosgw swift implementation requires an
# index.html at the root in order for any other indexes to
# work.
self.cloud.create_object(self.container,
name='index.html',
data='',
content_type='text/html')
self.url = os.path.join(self.cloud.object_store.get_endpoint(),
self.container, self.prefix)
def upload(self, file_list):
"""Spin up thread pool to upload to swift"""
num_threads = min(len(file_list), MAX_UPLOAD_THREADS)
threads = []
queue = queuelib.Queue()
# add items to queue
for f in file_list:
queue.put(f)
for x in range(num_threads):
t = threading.Thread(target=self.post_thread, args=(queue,))
threads.append(t)
t.start()
for t in threads:
t.join()
def post_thread(self, queue):
while True:
try:
file_detail = queue.get_nowait()
logging.debug("%s: processing job %s",
threading.current_thread(),
file_detail)
self._post_file(file_detail)
except requests.exceptions.RequestException:
# Do our best to attempt to upload all the files
logging.exception("Error posting file after multiple attempts")
continue
except IOError:
# Do our best to attempt to upload all the files
logging.exception("Error opening file")
continue
except queuelib.Empty:
# No more work to do
return
@staticmethod
def _is_text_type(mimetype):
# We want to compress all text types.
if mimetype.startswith('text/'):
return True
# Further compress types that typically contain text but are no
# text sub type.
compress_types = [
'application/json',
'image/svg+xml',
]
if mimetype in compress_types:
return True
return False
def _post_file(self, file_detail):
relative_path = os.path.join(self.prefix, file_detail.relative_path)
headers = {}
if self.delete_after:
headers['x-delete-after'] = str(self.delete_after)
headers['content-type'] = file_detail.mimetype
for attempt in range(1, POST_ATTEMPTS + 1):
try:
if not file_detail.folder:
if (file_detail.encoding is None and
self._is_text_type(file_detail.mimetype)):
headers['content-encoding'] = 'deflate'
data = DeflateFilter(open(file_detail.full_path, 'rb'))
else:
if file_detail.encoding:
headers['content-encoding'] = file_detail.encoding
data = open(file_detail.full_path, 'rb')
else:
data = ''
relative_path = relative_path.rstrip('/')
if relative_path == '':
relative_path = '/'
self.cloud.create_object(self.container,
name=relative_path,
data=data,
**headers)
break
except requests.exceptions.RequestException:
logging.exception(
"File posting error on attempt %d" % attempt)
if attempt >= POST_ATTEMPTS:
raise
def run(cloud, container, files,
indexes=True, parent_links=True, topdir_parent_link=False,
partition=False, footer='index_footer.html', delete_after=15552000,
prefix=None, public=True, dry_run=False):
if prefix:
prefix = prefix.lstrip('/')
if partition and prefix:
parts = prefix.split('/')
if len(parts) > 1:
container += '_' + parts[0]
prefix = '/'.join(parts[1:])
# Create the objects to make sure the arguments are sound.
file_list = FileList()
indexer = Indexer(create_parent_links=parent_links,
create_topdir_parent_link=topdir_parent_link,
append_footer=footer)
# Scan the files.
for file_path in files:
file_list.add(file_path)
# (Possibly) make indexes.
if indexes:
file_list = indexer.make_indexes(file_list)
logging.debug("List of files prepared to upload:")
for x in file_list:
logging.debug(x)
# Do no connect to swift or do any uploading in a dry run
if dry_run:
# No URL is known, so return nothing
return
# Upload.
uploader = Uploader(cloud, container, prefix, delete_after,
public)
uploader.upload(file_list)
return uploader.url
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
cloud=dict(required=True, type='raw'),
container=dict(required=True, type='str'),
files=dict(required=True, type='list'),
partition=dict(type='bool', default=False),
indexes=dict(type='bool', default=True),
parent_links=dict(type='bool', default=True),
topdir_parent_link=dict(type='bool', default=False),
public=dict(type='bool', default=True),
footer=dict(type='str'),
delete_after=dict(type='int'),
prefix=dict(type='str'),
)
)
p = module.params
url = run(p.get('cloud'), p.get('container'), p.get('files'),
indexes=p.get('indexes'),
parent_links=p.get('parent_links'),
topdir_parent_link=p.get('topdir_parent_link'),
partition=p.get('partition'),
footer=p.get('footer'),
delete_after=p.get('delete_after', 15552000),
prefix=p.get('prefix'),
public=p.get('public'))
module.exit_json(changed=True,
url=url)
def cli_main():
parser = argparse.ArgumentParser(
description="Upload files to swift"
)
parser.add_argument('--verbose', action='store_true',
help='show debug information')
parser.add_argument('--no-indexes', action='store_true',
help='do not generate any indexes at all')
parser.add_argument('--no-parent-links', action='store_true',
help='do not include links back to a parent dir')
parser.add_argument('--create-topdir-parent-link', action='store_true',
help='include a link in the root directory of the '
'files to the parent directory which may be the '
'index of all results')
parser.add_argument('--no-public', action='store_true',
help='do not create the container as public')
parser.add_argument('--partition', action='store_true',
help='partition the prefix into multiple containers')
parser.add_argument('--append-footer', default='index_footer.html',
help='when generating an index, if the given file is '
'present in a directory, append it to the index '
'(set to "none" to disable)')
parser.add_argument('--delete-after', default=15552000,
help='Number of seconds to delete object after '
'upload. Default is 6 months (15552000 seconds) '
'and if set to 0 X-Delete-After will not be set',
type=int)
parser.add_argument('--prefix',
help='Prepend this path to the object names when '
'uploading')
parser.add_argument('--dry-run', action='store_true',
help='do not attempt to create containers or upload, '
'useful with --verbose for debugging')
parser.add_argument('cloud',
help='Name of the cloud to use when uploading')
parser.add_argument('container',
help='Name of the container to use when uploading')
parser.add_argument('files', nargs='+',
help='the file(s) to upload with recursive glob '
'matching when supplied as a string')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
# Set requests log level accordingly
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.captureWarnings(True)
append_footer = args.append_footer
if append_footer.lower() == 'none':
append_footer = None
url = run(args.cloud, args.container, args.files,
indexes=not args.no_indexes,
parent_links=not args.no_parent_links,
topdir_parent_link=args.create_topdir_parent_link,
partition=args.partition,
footer=append_footer,
delete_after=args.delete_after,
prefix=args.prefix,
public=not args.no_public,
dry_run=args.dry_run)
print(url)
if __name__ == '__main__':
# Avoid unactionable warnings
requestsexceptions.squelch_warnings(
requestsexceptions.InsecureRequestWarning)
if sys.stdin.isatty():
cli_main()
else:
ansible_main()
|
testing.py
|
import cv2
import numpy as np
from threading import Event, Thread
import time
import tensorflow.keras
from PIL import Image, ImageOps
import json
from tensorflow.keras.models import load_model
p = 0
prediction = ['1 Finger', '2 Fingers',
'3 Fingers', '4 Fingers', '5 Fingers']
# Camera
camera = cv2.VideoCapture(0)
camera.set(10, 200)
# parameters
bgCapture = 0
bgSubThreshold = 50
learningRate = 0
blurValue = 41
threshold = 60
cap_region_x_begin = 0.5 # start point/total width
cap_region_y_end = 0.8 # start point/total width
imgCount = 0
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
# json_file = open('Hand_gesture/Saved_models/final1', 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# loaded_model = model_from_json(loaded_model_json)
# loaded_model.load_weights('Hand_gesture/final_model1.h5')
loaded_model = load_model('model.h5')
class RepeatedTimer:
def __init__(self, interval):
self.interval = interval
self.start = time.time()
self.event = Event()
self.thread = Thread(target=self._target)
self.thread.start()
def _target(self):
while not self.event.wait(self._time):
print("Current prediction:-" + str(p))
@property
def _time(self):
return self.interval - ((time.time() - self.start) % self.interval)
def stop(self):
self.event.set()
self.thread.join()
# start timer
timer = RepeatedTimer(2)
def pred():
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
image = Image.open('img0.png')
image = image.convert('RGB')
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# Load the image into the array
data[0] = image_array
# run the inference
prediction = loaded_model.predict(data)
return np.argmax(prediction)
def remove_background(frame):
fgmask = bgModel.apply(frame, learningRate=learningRate)
kernel = np.ones((3, 3), np.uint8)
#fgmask = cv2.dilate(fgmask, kernel, iterations=1)
fgmask = cv2.erode(fgmask, kernel, iterations=1)
res = cv2.bitwise_and(frame, frame, mask=fgmask)
return res
while camera.isOpened():
ret, frame = camera.read()
frame = cv2.bilateralFilter(frame, 5, 50, 100) # smoothing filter
frame = cv2.flip(frame, 1) # flip the frame horizontally
cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
(frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)
cv2.imshow('original', frame)
if bgCapture == 1:
img = remove_background(frame)
img = img[0:int(cap_region_y_end * frame.shape[0]),
int(cap_region_x_begin * frame.shape[1]): frame.shape[1]] # clip the ROI
# convert the image into binary image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
ret, thresh = cv2.threshold(
blur, threshold, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imwrite(f'img0.png', thresh)
p = pred()
""" print('predecting: - ', p) """
cv2.putText(thresh, prediction[p], (20, 20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('threshold', thresh)
k = cv2.waitKey(10)
if k == 27: # press ESC to exit all windows at any time
break
elif k == ord('b'): # press 'b' to capture the background
bgModel = cv2.createBackgroundSubtractorMOG2(
0, bgSubThreshold, detectShadows=False)
bgCapture = 1
timer.stop()
|
interface_rpc.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Askalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import AskalcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
from threading import Thread
import subprocess
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError(f"Expected RPC error {expected_rpc_code}, got none")
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
def test_work_queue_getblock(node, got_exceeded_error):
while not got_exceeded_error:
try:
node.cli('getrpcinfo').send_cli()
except subprocess.CalledProcessError as e:
assert_equal(e.output, 'error: Server response: Work queue depth exceeded\n')
got_exceeded_error.append(True)
class RPCInterfaceTest(AskalcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getblockhash", "id": 3, "params": [0]},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def test_work_queue_exceeded(self):
self.log.info("Testing work queue exceeded...")
self.restart_node(0, ['-rpcworkqueue=1', '-rpcthreads=1'])
got_exceeded_error = []
threads = []
for _ in range(3):
t = Thread(target=test_work_queue_getblock, args=(self.nodes[0], got_exceeded_error))
t.start()
threads.append(t)
for t in threads:
t.join()
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
self.test_work_queue_exceeded()
if __name__ == '__main__':
RPCInterfaceTest().main()
|
ssh_interactive_commnds.py
|
import sys
import os
import select
import socket
import paramiko
import threading
import multiprocessing
import time
import commands
import subprocess
from fabric.api import *
from fabric.state import connections as fab_connections
from tcutils.commands import ssh, execute_cmd, execute_cmd_out
from tcutils.util import *
from tcutils.fabfile import *
class SshConnect(threading.Thread):
def __init__(self, remoteCmdExecuterObj):
threading.Thread.__init__(self)
self.remoteCmdExecuterObj = remoteCmdExecuterObj
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.load_host_keys(
os.path.expanduser(os.path.join("~", ".ssh", "known_hosts")))
def run(self):
try:
self.ssh.connect(self.remoteCmdExecuterObj.host,
username=self.remoteCmdExecuterObj.username,
password=self.remoteCmdExecuterObj.password)
except:
print("(pid %d) ssh to %s failed.." %
(os.getpid(), self.remoteCmdExecuterObj.host))
return
self.remoteCmdExecuterObj._ssh = self.ssh
class remoteCmdExecuter:
def __init__(self):
pass
def execConnect(self, host, username, password):
retry = 0
self.host = host
self.username = username
self.password = password
self._ssh = None
return
while self._ssh == None and retry < 100:
retry += 1
''' This command hangs. Hence launch a thread in background and timeout '''
t = SshConnect(self)
t.start()
t.join(10)
if self._ssh != None:
break
time.sleep(5)
if self._ssh == None and t.isAlive():
print(
"************ %d. Kill frozen ssh connection to %s, retry" %
(retry, host))
try:
t._Thread_stop()
except:
print(
"%d. ssh to %s Thread could not be terminated!, ignore." %
(retry, host))
if self._ssh == None:
print("********* FATAL ********** SSH to %s failed!" % (host))
def execCmd(self, cmd, username, password, node, local_ip):
fab_connections.clear()
with hide('everything'):
with settings(
host_string='%s@%s' % (username, local_ip),
password=password,
warn_only=True, abort_on_prompts=False, debug=True):
if 'show' in cmd:
result = run_netconf_on_node(
host_string='%s@%s' % (
username, node),
password=password,
cmds=cmd, op_format='json')
#ssh_conf_file_alternate = "-o UserKnownHostsFile=/dev/null -o strictHostKeyChecking=no"
else:
output = run_fab_cmd_on_node(
host_string='%s@%s' % (username, node),
password=password, cmd=cmd, as_sudo=True)
return result
def testRemoteCmdExecuter():
aD = remoteCmdExecuter()
aD.execConnect('10.84.7.250', 'root', 'Embe1mpls')
# aD.execConnect( '10.84.7.42', 'root', 'c0ntrail123')
# print aD.execCmd ('ping 39.0.0.1 -I 10.84.7.42 -c 1 -W 1 | grep -i " 0%
# packet loss"')
print aD.execCmd('cli show bgp summary | display xml')
# print aD.execCmd ('ifsmon -Id | grep ROUTE')
# print aD.execCmd ('cli -c "show bgp summary"')
if __name__ == "__main__":
processList = []
for i in range(1, 2):
process = multiprocessing.Process(target=testRemoteCmdExecuter)
process.start()
processList.append(process)
for process in processList:
process.join()
|
main.py
|
import json
import time
import threading
import base64
import cv2
import numpy as np
import onnxruntime
from flask import Flask, request, Response
import requests
from azure.iot.device import IoTHubModuleClient
from object_detection import ObjectDetection
from onnxruntime_predict import ONNXRuntimeObjectDetection
from utility import get_file_zip, normalize_rtsp
MODEL_DIR = 'model'
UPLOAD_INTERVAL = 1 # sec
DETECTION_TYPE_NOTHING = 'nothing'
DETECTION_TYPE_SUCCESS = 'success'
DETECTION_TYPE_UNIDENTIFIED = 'unidentified'
DETECTION_BUFFER_SIZE = 10000
IMG_WIDTH=960
IMG_HEIGHT=540
def is_edge():
try:
IoTHubModuleClient.create_from_edge_environment()
return True
except:
return False
try:
iot = IoTHubModuleClient.create_from_edge_environment()
except:
iot = None
def web_module_url():
if is_edge(): return '172.18.0.1:8080'
else: return 'localhost:8000'
def is_inside_aoi(x1, y1, x2, y2, aoi_info):
for aoi_area in aoi_info:
#print(x1, y1, x2, y2, aoi_area)
if ( (aoi_area['x1'] <= x1 <= aoi_area['x2']) or (aoi_area['x1'] <= x2 <= aoi_area['x2']) ) and \
( (aoi_area['y1'] <= y1 <= aoi_area['y2']) or (aoi_area['y1'] <= y2 <= aoi_area['y2']) ):
#print('in')
return True
return False
def parse_bbox(prediction, width, height):
x1 = int(prediction['boundingBox']['left'] * width)
y1 = int(prediction['boundingBox']['top'] * height)
x2 = x1 + int(prediction['boundingBox']['width'] * width)
y2 = y1 + int(prediction['boundingBox']['height'] * height)
x1 = min(max(x1, 0), width-1)
x2 = min(max(x2, 0), width-1)
y1 = min(max(y1, 0), height-1)
y2 = min(max(y2, 0), height-1)
return (x1, y1), (x2, y2)
def draw_confidence_level(img, prediction):
height, width = img.shape[0], img.shape[1]
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.7
thickness = 2
prob_str = str(int(prediction['probability']*1000)/10)
prob_str = ' (' + prob_str + '%)'
(x1, y1), (x2, y2) = parse_bbox(prediction, width, height)
img = cv2.putText(img, prediction['tagName']+prob_str, (x1+10, y1+20), font, font_scale, (20, 20, 255), thickness)
return img
class ONNXRuntimeModelDeploy(ObjectDetection):
"""Object Detection class for ONNX Runtime
"""
def __init__(self, model_dir, cam_type="video_file", cam_source="./sample_video/video.mp4"):
#def __init__(self, model_dir, cam_type="video_file", cam_source="./mov_bbb.mp4"):
#def __init__(self, model_dir, cam_type="video_file", cam_source="./sample_video/video_1min.mp4"):
#def __init__(self, model_dir, cam_type="rtsp", cam_source="rtsp://52.229.36.89:554/media/catvideo.mkv"):
# Default system params
self.render = False
self.lock = threading.Lock()
self.cam_type = cam_type
self.cam_source = cam_source
self.cam = cv2.VideoCapture(normalize_rtsp(cam_source))
self.model = self.load_model(model_dir, is_default_model=True)
self.model_uri = None
self.last_img = None
self.last_prediction = []
self.confidence_min = 30 * 0.01
self.confidence_max = 30 * 0.01
self.max_images = 10
self.last_upload_time = 0
self.is_upload_image = False
self.current_uploaded_images = {}
self.detection_success_num = 0
self.detection_unidentified_num = 0
self.detection_total = 0
self.detections = []
self.threshold = 0.3
self.has_aoi = False
self.aoi_info = None
# Part that we want to detect
self.parts = []
self.is_gpu = (onnxruntime.get_device() == 'GPU')
self.average_inference_time = 0
# IoT Hub
self.iothub_is_send = False
self.iothub_threshold = 0.5
self.iothub_fpm = 0
self.iothub_last_send_time = time.time()
self.iothub_interval = 99999999
#self.iothub_is_send = True
#self.iothub_threshold = 0.8
#self.iothub_fpm = 1
#self.iothub_last_send_time = time.time()
#self.iothub_interval = 5
def restart_cam(self):
print('[INFO] Restarting Cam')
cam = cv2.VideoCapture(normalize_rtsp(self.cam_source))
# Protected by Mutex
self.lock.acquire()
self.cam.release()
self.cam = cam
self.lock.release()
def update_parts(self, parts):
print('[INFO] Updating Parts ...', parts)
self.parts = parts
def update_cam(self, cam_type, cam_source, has_aoi, aoi_info):
print('[INFO] Updating Cam ...')
#print(' cam_type', cam_type)
#print(' cam_source', cam_source)
if cam_source == '0': cam_source = 0
elif cam_source == '1': cam_source = 1
elif cam_source == '2': cam_source = 2
elif cam_source == '3': cam_source = 3
if self.cam_type == cam_type and self.cam_source == cam_source: return
self.cam_source = cam_source
self.has_aoi = has_aoi
self.aoi_info = aoi_info
cam = cv2.VideoCapture(normalize_rtsp(cam_source))
# Protected by Mutex
self.lock.acquire()
self.cam.release()
self.cam = cam
self.lock.release()
def load_model(self, model_dir, is_default_model):
if is_default_model:
print('[INFO] Loading Default Model ...')
model = None
with open(model_dir + str('/cvexport.manifest')) as f:
data = json.load(f)
# FIXME to check whether we need to close the previous session
if data['DomainType'] == 'ObjectDetection':
model = ObjectDetection(data, model_dir, None)
return model
else:
print('[INFO] Loading Default Model ...')
with open('model/labels.txt', 'r') as f:
labels = [l.strip() for l in f.readlines()]
model = ONNXRuntimeObjectDetection('model/model.onnx', labels)
return model
return None
def update_retrain_parameters(self, confidence_min, confidence_max, max_images):
self.confidence_min = confidence_min * 0.01
self.confidence_max = confidence_max * 0.01
self.max_images = max_imagese
self.threshold = self.confidence_max
def update_model(self, model_dir):
is_default_model = ('default_model' in model_dir)
model = self.load_model(model_dir, is_default_model)
# Protected by Mutex
self.lock.acquire()
self.model = model
self.current_uploaded_images = {}
self.is_upload_image = True
self.detection_success_num = 0
self.detection_unidentified_num = 0
self.detection_total = 0 # nothing isn't included
self.detections = []
self.lock.release()
def update_iothub_parameters(self, is_send, threshold, fpm):
self.iothub_is_send = is_send
self.iothub_threshold = threshold
self.iothub_fpm = fpm
self.iothub_last_send_time = time.time()
if fpm == 0:
self.iothub_is_send = 0
self.iothub_interval = 99999999
else:
self.iothub_interval = 60 / fpm # seconds
def predict(self, image):
self.lock.acquire()
prediction, inf_time = self.model.predict_image(image)
self.lock.release()
inf_time_ms = inf_time * 1000
self.average_inference_time = 1/16*inf_time_ms + 15/16*self.average_inference_time
return prediction
def start_session(self):
def run(self):
send_counter = 0
while True:
self.lock.acquire()
b, img = self.cam.read()
if b:
width = IMG_WIDTH
ratio = IMG_WIDTH / img.shape[1]
height = int(img.shape[0] * ratio + 0.000001)
if height >= IMG_HEIGHT:
height = IMG_HEIGHT
ratio = IMG_HEIGHT / img.shape[0]
width = int(img.shape[1] * ratio + 0.000001)
img = cv2.resize(img, (width, height))
self.lock.release()
# if b is false, restart the video if the type is video
if b:
self.last_img = img
self.last_prediction = self.predict(img)
#print(self.last_prediction)
height, width = img.shape[0], img.shape[1]
detection = DETECTION_TYPE_NOTHING
if True:
send_counter += 1
if self.iothub_is_send:
if self.iothub_last_send_time + self.iothub_interval < time.time():
predictions_to_send = []
for prediction in self.last_prediction:
_tag = prediction['tagName']
_p = prediction['probability']
if _tag not in self.parts: continue
if _p < self.iothub_threshold: continue
x1 = int(prediction['boundingBox']['left'] * width)
y1 = int(prediction['boundingBox']['top'] * height)
x2 = x1 + int(prediction['boundingBox']['width'] * width)
y2 = y1 + int(prediction['boundingBox']['height'] * height)
x1 = min(max(x1, 0), width-1)
x2 = min(max(x2, 0), width-1)
y1 = min(max(y1, 0), height-1)
y2 = min(max(y2, 0), height-1)
if self.has_aoi:
if not is_inside_aoi(x1, y1, x2, y2, self.aoi_info): continue
predictions_to_send.append(prediction)
if len(predictions_to_send) > 0:
if iot:
try:
iot.send_message_to_output(json.dumps(predictions_to_send), 'metrics')
except:
print('[ERROR] Failed to send message to iothub', flush=True)
print('[INFO] sending metrics to iothub')
else:
#print('[METRICS]', json.dumps(predictions_to_send))
pass
self.iothub_last_send_time = time.time()
for prediction in self.last_prediction:
tag = prediction['tagName']
if tag not in self.parts:
continue
(x1, y1) , (x2, y2) = parse_bbox(prediction, width, height)
if self.has_aoi:
if not is_inside_aoi(x1, y1, x2, y2, self.aoi_info): continue
if detection != DETECTION_TYPE_SUCCESS:
if prediction['probability'] >= self.threshold:
detection = DETECTION_TYPE_SUCCESS
else:
detection = DETECTION_TYPE_UNIDENTIFIED
if self.last_upload_time + UPLOAD_INTERVAL < time.time():
if self.confidence_min <= prediction['probability'] <= self.confidence_max:
if self.is_upload_image:
#if tag in onnx.current_uploaded_images and self.current_uploaded_images[tag] >= onnx.max_images:
#if tag in onnx.current_uploaded_images:
# No limit for the max_images in inference module now, the logic is moved to webmodule
# pass
#else:
if True:
labels = json.dumps([{'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2}])
print('[INFO] Sending Image to relabeling', tag, onnx.current_uploaded_images.get(tag, 0), labels)
#self.current_uploaded_images[tag] = self.current_uploaded_images.get(tag, 0) + 1
self.last_upload_time = time.time()
jpg = cv2.imencode('.jpg', img)[1].tobytes()
try:
requests.post('http://'+web_module_url()+'/api/relabel', data={
'confidence': prediction['probability'],
'labels': labels,
'part_name': tag,
'is_relabel': True,
'img': base64.b64encode(jpg)
})
except:
print('[ERROR] Failed to update image for relabeling')
self.lock.acquire()
if detection == DETECTION_TYPE_NOTHING:
pass
else:
if self.detection_total == DETECTION_BUFFER_SIZE:
oldest_detection = self.detections.pop(0)
if oldest_detection == DETECTION_TYPE_UNIDENTIFIED:
self.detection_unidentified_num -= 1
elif oldest_detection == DETECTION_TYPE_SUCCESS:
self.detection_success_num -= 1
self.detections.append(detection)
if detection == DETECTION_TYPE_UNIDENTIFIED:
self.detection_unidentified_num += 1
elif detection == DETECTION_TYPE_SUCCESS:
self.detection_success_num += 1
else:
self.detections.append(detection)
if detection == DETECTION_TYPE_UNIDENTIFIED:
self.detection_unidentified_num += 1
elif detection == DETECTION_TYPE_SUCCESS:
self.detection_success_num += 1
self.detection_total += 1
self.lock.release()
#print(detection)
else:
if self.cam_type == 'video_file':
self.restart_cam()
#print(self.last_prediction)
if self.cam_type == 'video_file':
time.sleep(0.01)
self.session = threading.Thread(target=run, args=(self,))
self.session.start()
def local_test(self):
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
cv2.imshow('img', img)
res = self.predict(img)
print(res)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
model_dir = './default_model'
#model_dir = './default_model_6parts'
onnx = ONNXRuntimeModelDeploy(model_dir)
onnx.start_session()
app = Flask(__name__)
@app.route('/prediction', methods=['GET'])
def predict():
#print(onnx.last_prediction)
#onnx.last_prediction
return json.dumps(onnx.last_prediction)
@app.route('/metrics', methods=['GET'])
def metrics():
inference_num = onnx.detection_success_num
unidentified_num = onnx.detection_unidentified_num
total = onnx.detection_total
is_gpu = onnx.is_gpu
average_inference_time = onnx.average_inference_time
if total == 0:
success_rate = 0
else:
success_rate = inference_num * 100 / total
return json.dumps({
'success_rate': success_rate,
'inference_num': inference_num,
'unidentified_num': unidentified_num,
'is_gpu': is_gpu,
'average_inference_time': average_inference_time,
})
@app.route('/update_retrain_parameters')
def update_retrain_parameters():
confidence_min = request.args.get('confidence_min')
if not confidence_min: return 'missing confidence_min'
confidence_max = request.args.get('confidence_max')
if not confidence_max: return 'missing confidence_max'
max_images = request.args.get('max_images')
if not max_images: return 'missing max_images'
onnx.confidence_min = int(confidence_min) * 0.01
onnx.confidence_max = int(confidence_max) * 0.01
onnx.max_images = int(max_images)
print('[INFO] updaing retrain parameters to')
print(' conficen_min:', confidence_min)
print(' conficen_max:', confidence_max)
print(' max_images :', max_images)
return 'ok'
@app.route('/update_model')
def update_model():
model_uri = request.args.get('model_uri')
model_dir = request.args.get('model_dir')
if not model_uri and not model_dir: return ('missing model_uri or model_dir')
print('[INFO] Update Model ...')
if model_uri:
print('[INFO] Got Model URI', model_uri)
if model_uri == onnx.model_uri:
print('[INFO] Model Uri unchanged')
else:
get_file_zip(model_uri, MODEL_DIR)
onnx.model_uri = model_uri
onnx.update_model('model')
print('[INFO] Update Finished ...')
return 'ok'
elif model_dir:
print('[INFO] Got Model DIR', model_dir)
onnx.update_model(model_dir)
print('[INFO] Update Finished ...')
return 'ok'
@app.route('/update_cam')
def update_cam():
cam_type = request.args.get('cam_type')
cam_source = request.args.get('cam_source')
if not cam_type: return 'missing cam_type'
if not cam_source: return 'missing cam_source'
print('updating cam ...')
print(' cam_type', cam_type)
print(' cam_source', cam_source)
aoi = request.args.get('aoi')
try:
aoi = json.loads(aoi)
has_aoi = aoi['useAOI']
aoi_info = aoi['AOIs']
except:
has_aoi = False
aoi_info = None
print(' has_aoi', has_aoi)
print(' aoi_info', aoi_info)
onnx.update_cam(cam_type, cam_source, has_aoi, aoi_info)
return 'ok'
@app.route('/update_parts')
def update_parts():
try:
print('----Upadate parts----')
parts = request.args.getlist('parts')
print('[INFO] Updating parts', parts)
self.parts = parts
print('[INFO] Updated parts', parts)
except:
print('[ERROR] Unknown format', parts)
#return 'unknown format'
onnx.update_parts(parts)
return 'ok'
#@app.route('/update_threshold')
#def update_threshold():
# print('[WARNING] is depreciated')
# return 'ok'
@app.route('/update_iothub_parameters')
def update_iothub_parameters():
is_send = request.args.get('is_send')
threshold = request.args.get('threshold')
fpm = request.args.get('fpm')
if not is_send: return 'missing is_send'
if not threshold: return 'missing threshold'
if not fpm: return 'missing fpm'
is_send = (is_send == 'True')
threshold = int(threshold) * 0.01
fpm = int(fpm)
print('updating iothub parameters ...')
print(' is_send', is_send)
print(' threshold', threshold)
print(' fpm', fpm)
onnx.update_iothub_parameters(is_send, threshold, fpm)
return 'ok'
@app.route('/update_prob_threshold')
def update_prob_threshold():
prob_threshold = request.args.get('prob_threshold')
if not prob_threshold: return 'missing prob_threshold'
onnx.threshold = int(prob_threshold) * 0.01
print('[INFO] updaing prob_threshold to')
print(' prob_threshold:', prob_threshold)
onnx.lock.acquire()
onnx.detection_success_num = 0
onnx.detection_unidentified_num = 0
onnx.detection_total = 0
onnx.detections = []
onnx.lock.release()
return 'ok'
@app.route('/video_feed')
def video_feed():
inference = not not request.args.get('inference')
print(inference)
def _gen():
while True:
img = onnx.last_img.copy()
if inference:
height, width = img.shape[0], img.shape[1]
predictions = onnx.last_prediction
for prediction in predictions:
tag = prediction['tagName']
if tag not in onnx.parts: continue
if onnx.has_aoi:
for aoi_area in onnx.aoi_info:
img = cv2.rectangle(img, (int(aoi_area['x1']), int(aoi_area['y1'])), (int(aoi_area['x2']), int(aoi_area['y2'])), (0, 255, 255), 2)
if prediction['probability'] > onnx.threshold:
(x1, y1), (x2, y2) = parse_bbox(prediction, width, height)
if onnx.has_aoi:
if not is_inside_aoi(x1, y1, x2, y2, onnx.aoi_info): continue
img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
img = draw_confidence_level(img, prediction)
time.sleep(0.02)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + cv2.imencode('.jpg', img)[1].tobytes() + b'\r\n')
return Response(_gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def main():
app.run(host='0.0.0.0', debug=False)
if __name__ == '__main__':
main()
|
__init__.py
|
#!/usr/bin/python
import base64
from binascii import hexlify
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from distutils.spawn import find_executable
from kvirt import common
from kvirt.common import error, pprint
from kvirt.defaults import UBUNTUS, METADATA_FIELDS
from math import ceil
from pyVmomi import vim, vmodl
from pyVim import connect
import json
import os
import re
import requests
import random
from ssl import _create_unverified_context, get_server_certificate
import tarfile
from tempfile import TemporaryDirectory
from threading import Thread
import time
import pyVmomi
import webbrowser
from zipfile import ZipFile
def waitForMe(t):
while t.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
time.sleep(1)
if t.info.state == vim.TaskInfo.State.error:
error(t.info.description)
error(t.info.error)
os._exit(1)
def collectproperties(si, view, objtype, pathset=None, includemors=False):
collector = si.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
objspec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
objspec.obj = view
objspec.skip = True
# Create a traversal specification to identify the path for collection
traversalspec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversalspec.name = 'traverseEntities'
traversalspec.path = 'view'
traversalspec.skip = False
traversalspec.type = view.__class__
objspec.selectSet = [traversalspec]
# Identify the properties to the retrieved
propertyspec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
propertyspec.type = objtype
if not pathset:
propertyspec.all = True
propertyspec.pathSet = pathset
# Add the object and property specification to the
# property filter specification
filterspec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = [objspec]
filterspec.propSet = [propertyspec]
# Retrieve properties
props = collector.RetrieveContents([filterspec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if includemors:
properties['obj'] = obj.obj
data.append(properties)
return data
def find(si, folder, vimtype, name):
o = si.content.viewManager.CreateContainerView(folder, [vimtype], True)
view = o.view
o.Destroy()
element = None
for e in view:
if e.name == name:
element = e
break
return element
def findvm(si, folder, name):
view = si.content.viewManager.CreateContainerView(folder, [vim.VirtualMachine], True)
vmlist = collectproperties(si, view=view, objtype=vim.VirtualMachine, pathset=['name'], includemors=True)
vm = list(filter(lambda v: v['name'] == name, vmlist))
if len(vm) >= 1:
return vm[-1]['obj']
else:
return None
def convert(octets, GB=True):
# return str(float(octets) / 1024 / 1024 / 1024) + "GB"
result = str(ceil(float(octets) / 1024 / 1024 / 1024))
if GB:
result += "GB"
return result
def dssize(ds):
di = ds.summary
return convert(di.capacity), convert(di.freeSpace)
def makecuspec(name, nets=[], gateway=None, dns=None, domain=None):
customspec = vim.vm.customization.Specification()
ident = vim.vm.customization.LinuxPrep()
ident.hostName = vim.vm.customization.FixedName()
ident.hostName.name = name
globalip = vim.vm.customization.GlobalIPSettings()
if domain:
ident.domain = domain
customspec.identity = ident
if dns is not None or domain is not None:
if dns is not None:
globalip.dnsServerList = [dns]
# if dns2:
# globalip.dnsServerList.append(dns2)
if domain is not None:
globalip.dnsSuffixList = domain
customspec.globalIPSettings = globalip
adaptermaps = []
for index, net in enumerate(nets):
if isinstance(net, str) or (len(net) == 1 and 'name' in net):
if index == 0:
continue
# nicname = "eth%d" % index
ip = None
netmask = None
# noconf = None
# vips = []
elif isinstance(net, dict):
# nicname = net.get('nic', "eth%d" % index)
ip = net.get('ip')
netmask = next((e for e in [net.get('mask'), net.get('netmask')] if e is not None), None)
# noconf = net.get('noconf')
# vips = net.get('vips')
if ip is not None and netmask is not None and gateway is not None and domain is not None:
guestmap = vim.vm.customization.AdapterMapping()
guestmap.adapter = vim.vm.customization.IPSettings()
guestmap.adapter.ip = vim.vm.customization.FixedIp()
guestmap.adapter.ip.ipAddress = ip
guestmap.adapter.subnetMask = netmask
guestmap.adapter.gateway = gateway
guestmap.adapter.dnsDomain = domain
adaptermaps.append(guestmap)
customspec.nicSettingMap = adaptermaps
return customspec
def createnicspec(nicname, netname, nictype=None):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if nictype == 'pcnet32':
nic = vim.vm.device.VirtualPCNet32()
elif nictype == 'e1000':
nic = vim.vm.device.VirtualE1000()
elif nictype == 'e1000e':
nic = vim.vm.device.VirtualE1000e()
else:
nic = vim.vm.device.VirtualVmxnet3()
desc = vim.Description()
desc.label = nicname
nicbacking = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
desc.summary = netname
nicbacking.deviceName = netname
nic.backing = nicbacking
# nic.key = 0
nic.deviceInfo = desc
nic.addressType = 'generated'
nicspec.device = nic
return nicspec
def createdvsnicspec(nicname, netname, switchuuid, portgroupkey, nictype=None):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if nictype == 'pcnet32':
nic = vim.vm.device.VirtualPCNet32()
elif nictype == 'e1000':
nic = vim.vm.device.VirtualE1000()
elif nictype == 'e1000e':
nic = vim.vm.device.VirtualE1000e()
else:
nic = vim.vm.device.VirtualVmxnet3()
dnicbacking = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
dvconnection = vim.dvs.DistributedVirtualSwitchPortConnection()
dvconnection.switchUuid = switchuuid
dvconnection.portgroupKey = portgroupkey
dnicbacking.port = dvconnection
nic.backing = dnicbacking
nicspec.device = nic
return nicspec
def createscsispec():
ckey = 1000
# SCSISPEC
scsispec = vim.vm.device.VirtualDeviceSpec()
scsispec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
# scsictrl = vim.vm.device.VirtualLsiLogicController()
scsictrl = vim.vm.device.ParaVirtualSCSIController()
scsictrl.key = ckey
scsictrl.busNumber = 0
scsictrl.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.noSharing
scsispec.device = scsictrl
return scsispec
def creatediskspec(number, disksize, ds, diskmode, thin=False):
ckey = 1000
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
vd = vim.vm.device.VirtualDisk()
vd.capacityInKB = disksize
diskspec.device = vd
vd.unitNumber = number
vd.controllerKey = ckey
diskfilebacking = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
filename = "[" + ds.name + "]"
diskfilebacking.fileName = filename
diskfilebacking.diskMode = diskmode
diskfilebacking.thinProvisioned = True if thin else False
vd.backing = diskfilebacking
return diskspec
def createcdspec():
# http://books.google.es/books?id=SdsnGmhF0QEC&pg=PA145&lpg=PA145&dq=VirtualCdrom%2Bspec&source=bl&ots=s8O2mw437-&sig=JpEo-AqmDV42b3fxpTcCt4xknEA&hl=es&sa=X&ei=KgGfT_DqApOy8QOl07X6Dg&redir_esc=y#v=onepage&q=VirtualCdrom%2Bspec&f=false
cdspec = vim.vm.device.VirtualDeviceSpec()
cdspec.setOperation(vim.vm.device.VirtualDeviceSpec.Operation.add)
cd = vim.vm.device.VirtualCdrom()
cdbacking = vim.vm.device.VirtualCdrom.AtapiBackingInfo()
cd.backing = cdbacking
cd.controllerKey = 201
cd.unitNumber = 0
cd.key = -1
cdspec.device = cd
return cdspec
def createisospec(iso=None):
cdspec = vim.vm.device.VirtualDeviceSpec()
cdspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
connect = vim.vm.device.VirtualDevice.ConnectInfo()
connect.startConnected = True
connect.allowGuestControl = True
connect.connected = False
cd = vim.vm.device.VirtualCdrom()
cd.connectable = connect
cdbacking = vim.vm.device.VirtualCdrom.IsoBackingInfo()
if iso is not None:
cdbacking.fileName = iso
cd.backing = cdbacking
cd.controllerKey = 201
cd.unitNumber = 0
cd.key = -1
cdspec.device = cd
return cdspec
def createclonespec(pool):
clonespec = vim.vm.CloneSpec()
relocatespec = vim.vm.RelocateSpec()
relocatespec.pool = pool
clonespec.location = relocatespec
clonespec.powerOn = False
clonespec.template = False
return clonespec
def create_filter_spec(pc, vms):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = ['config.extraConfig.plan']
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(results):
vms = []
for o in results.objects:
if o.propSet[0].val is not None:
vms.append(o.obj)
return vms
def changecd(si, vm, iso):
virtual_cdrom_device = None
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualCdrom):
virtual_cdrom_device = dev
cdromspec = vim.vm.device.VirtualDeviceSpec()
cdromspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdromspec.device = vim.vm.device.VirtualCdrom()
cdromspec.device.controllerKey = virtual_cdrom_device.controllerKey
cdromspec.device.key = virtual_cdrom_device.key
cdromspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdromspec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
cdromspec.device.backing.fileName = iso
cdromspec.device.connectable.connected = True
cdromspec.device.connectable.startConnected = True
cdromspec.device.connectable.allowGuestControl = True
dev_changes = []
dev_changes.append(cdromspec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = dev_changes
task = vm.ReconfigVM_Task(spec=spec)
return task
raise RuntimeError("No cdrom found")
def createfolder(si, parentfolder, folder):
if find(si, parentfolder, vim.Folder, folder) is None:
parentfolder.CreateFolder(folder)
return None
def deletefolder(si, parentfolder, folder):
folder = find(si, parentfolder, vim.Folder, folder)
if folder is not None:
folder.Destroy()
def deletedirectory(si, dc, path):
d = si.content.fileManager.DeleteFile(path, dc)
waitForMe(d)
def keep_lease_alive(lease):
while(True):
time.sleep(5)
try:
lease.HttpNfcLeaseProgress(50)
if (lease.state == vim.HttpNfcLease.State.done):
return
except:
return
class Ksphere:
def __init__(self, host, user, password, datacenter, cluster, debug=False, isofolder=None,
filtervms=False, filteruser=False, filtertag=None):
# 4-1-CONNECT
si = connect.SmartConnect(host=host, port=443, user=user, pwd=password, sslContext=_create_unverified_context())
self.conn = si
self.si = si
self.vcip = host
self.url = "https://%s:%s@%s/sdk" % (user, password, host)
self.user = user
self.password = password
self.rootFolder = si.content.rootFolder
self.dc = find(si, self.rootFolder, vim.Datacenter, datacenter)
self.macaddr = []
self.clu = cluster
self.isofolder = isofolder
self.filtervms = filtervms
self.filtervms = filtervms
self.filteruser = filteruser
self.filtertag = filtertag
self.debug = debug
portgs = {}
o = si.content.viewManager.CreateContainerView(self.rootFolder, [vim.DistributedVirtualSwitch], True)
dvnetworks = o.view
o.Destroy()
for dvnetw in dvnetworks:
uuid = dvnetw.uuid
for portg in dvnetw.portgroup:
portgs[portg.name] = [uuid, portg.key]
self.portgs = portgs
return
def close(self):
self.si.content.sessionManager.Logout()
def exists(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
return True if vm is not None else False
def net_exists(self, name):
print("not implemented")
return
def create(self, name, virttype=None, profile='kvirt', flavor=None, plan='kvirt', cpumodel='host-model',
cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='centos7_64Guest', pool='default', image=None,
disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None,
vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None,
cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False,
files=[], enableroot=True, overrides={}, tags=[], storemetadata=False, sharedfolders=[],
kernel=None, initrd=None, cmdline=None, placement=[], autostart=False, cpuhotplug=False,
memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, rng=False, metadata={},
securitygroups=[]):
dc = self.dc
vmFolder = dc.vmFolder
diskmode = 'persistent'
default_diskinterface = diskinterface
default_diskthin = diskthin
default_disksize = disksize
default_pool = pool
memory = int(memory)
numcpus = int(numcpus)
si = self.si
rootFolder = self.rootFolder
if plan != 'kvirt':
createfolder(si, dc.vmFolder, plan)
vmfolder = find(si, dc.vmFolder, vim.Folder, plan)
else:
vmfolder = dc.vmFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
resourcepool = clu.resourcePool
if image is not None:
rootFolder = self.rootFolder
imageobj = findvm(si, rootFolder, image)
if imageobj is None:
return {'result': 'failure', 'reason': "Image %s not found" % image}
clonespec = createclonespec(resourcepool)
confspec = vim.vm.ConfigSpec()
confspec.annotation = name
confspec.memoryMB = memory
confspec.numCPUs = numcpus
planopt = vim.option.OptionValue()
planopt.key = 'plan'
planopt.value = plan
profileopt = vim.option.OptionValue()
profileopt.key = 'profile'
profileopt.value = profile
imageopt = vim.option.OptionValue()
imageopt.key = 'image'
imageopt.value = image
extraconfig = [imageopt, planopt, profileopt]
clonespec.config = confspec
clonespec.powerOn = False
cloudinitiso = None
if cloudinit:
if image is not None and common.needs_ignition(image):
version = common.ignition_version(image)
ignitiondata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns,
domain=domain, reserveip=reserveip, files=files,
enableroot=enableroot, overrides=overrides, version=version,
plan=plan, image=image)
ignitionopt = vim.option.OptionValue()
ignitionopt.key = 'guestinfo.ignition.config.data'
ignitionopt.value = base64.b64encode(ignitiondata.encode()).decode()
encodingopt = vim.option.OptionValue()
encodingopt.key = 'guestinfo.ignition.config.data.encoding'
encodingopt.value = 'base64'
extraconfig.extend([ignitionopt, encodingopt])
else:
gcmds = []
if image is not None and 'cos' not in image and 'fedora-coreos' not in image:
lower = image.lower()
if lower.startswith('fedora') or lower.startswith('rhel') or lower.startswith('centos'):
gcmds.append('yum -y install open-vm-tools')
elif lower.startswith('debian') or [x for x in UBUNTUS if x in lower] or 'ubuntu' in lower:
gcmds.append('apt-get update')
gcmds.append('apt-get -f install open-vm-tools')
gcmds.append('systemctl enable --now vmtoolsd')
index = 0
if image is not None and image.startswith('rhel'):
subindex = [i for i, value in enumerate(cmds) if value.startswith('subscription-manager')]
if subindex:
index = subindex.pop() + 1
cmds = cmds[:index] + gcmds + cmds[index:]
# customspec = makecuspec(name, nets=nets, gateway=gateway, dns=dns, domain=domain)
# clonespec.customization = customspec
isofolder = self.isofolder if self.isofolder is not None else "[%s]/%s" % (default_pool, name)
cloudinitiso = "%s/%s.ISO" % (isofolder, name)
userdata, metadata, netdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets,
gateway=gateway, dns=dns, domain=domain,
reserveip=reserveip, files=files,
enableroot=enableroot, overrides=overrides,
storemetadata=storemetadata, machine='vsphere',
image=image)
confspec.extraConfig = extraconfig
t = imageobj.CloneVM_Task(folder=vmfolder, name=name, spec=clonespec)
waitForMe(t)
if cloudinitiso is not None:
with TemporaryDirectory() as tmpdir:
common.make_iso(name, tmpdir, userdata, metadata, netdata)
cloudinitisofile = "%s/%s.ISO" % (tmpdir, name)
if self.isofolder is not None:
isofolder = self.isofolder.split('/')
isopool = re.sub(r"[\[\]]", '', isofolder[0])
isofolder = isofolder[1]
else:
isopool = default_pool
isofolder = None
self._uploadimage(isopool, cloudinitisofile, name, isofolder=isofolder)
vm = findvm(si, vmFolder, name)
c = changecd(self.si, vm, cloudinitiso)
waitForMe(c)
datastores = {}
confspec = vim.vm.ConfigSpec()
confspec.name = name
confspec.annotation = name
confspec.memoryMB = memory
confspec.numCPUs = numcpus
confspec.extraConfig = []
for entry in [field for field in metadata if field in METADATA_FIELDS]:
opt = vim.option.OptionValue()
opt.key = entry
opt.value = metadata[entry]
confspec.extraConfig.append(opt)
if nested:
confspec.nestedHVEnabled = True
confspec.guestId = 'centos7_64Guest'
vmfi = vim.vm.FileInfo()
filename = "[" + default_pool + "]"
vmfi.vmPathName = filename
confspec.files = vmfi
if vnc:
vncport = random.randint(5900, 7000)
opt1 = vim.option.OptionValue()
opt1.key = 'RemoteDisplay.vnc.port'
opt1.value = vncport
opt2 = vim.option.OptionValue()
opt2.key = 'RemoteDisplay.vnc.enabled'
opt2.value = "TRUE"
confspec.extraConfig = [opt1, opt2]
if image is None:
t = vmfolder.CreateVM_Task(confspec, resourcepool)
waitForMe(t)
vm = find(si, dc.vmFolder, vim.VirtualMachine, name)
currentdevices = vm.config.hardware.device
currentdisks = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualDisk)]
currentnics = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualEthernetCard)]
confspec = vim.vm.ConfigSpec()
devconfspec = []
for index, disk in enumerate(disks):
if disk is None:
disksize = default_disksize
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, int):
disksize = disk
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, str) and disk.isdigit():
disksize = int(disk)
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, dict):
disksize = disk.get('size', default_disksize)
diskthin = disk.get('thin', default_diskthin)
diskinterface = disk.get('interface', default_diskinterface)
diskpool = disk.get('pool', default_pool)
if index < len(currentdisks) and image is not None:
currentdisk = currentdisks[index]
currentsize = convert(1000 * currentdisk.capacityInKB, GB=False)
if int(currentsize) < disksize:
pprint("Waiting for image disk %s to be resized" % index)
currentdisk.capacityInKB = disksize * 1048576
diskspec = vim.vm.ConfigSpec()
diskspec = vim.vm.device.VirtualDeviceSpec(device=currentdisk, operation="edit")
devconfspec.append(diskspec)
continue
disksize = disksize * 1048576
if diskpool not in datastores:
datastore = find(si, rootFolder, vim.Datastore, diskpool)
if not datastore:
return {'result': 'failure', 'reason': "Pool %s not found" % diskpool}
else:
datastores[diskpool] = datastore
if index == 0:
scsispec = createscsispec()
devconfspec.append(scsispec)
diskspec = creatediskspec(index, disksize, datastore, diskmode, diskthin)
devconfspec.append(diskspec)
# NICSPEC
for index, net in enumerate(nets):
netname = net['name'] if isinstance(net, dict) else net
if netname == 'default':
netname = 'VM Network'
if index < len(currentnics):
currentnic = currentnics[index]
try:
currentnetwork = currentnic.backing.deviceName
except:
currentswitchuuid = currentnic.backing.port.switchUuid
currentportgroupkey = currentnic.backing.port.portgroupKey
for dvsnet in self.portgs:
if self.portgs[dvsnet][0] == currentswitchuuid and\
self.portgs[dvsnet][1] == currentportgroupkey:
currentnetwork = dvsnet
if currentnetwork != netname:
if netname in self.portgs:
switchuuid = self.portgs[netname][0]
portgroupkey = self.portgs[netname][1]
currentnic.backing.port.switchUuid = switchuuid
currentnic.backing.port.portgroupKey = portgroupkey
nicspec = vim.vm.device.VirtualDeviceSpec(device=currentnic, operation="edit")
devconfspec.append(nicspec)
else:
currentnic.backing.deviceName = netname
nicspec = vim.vm.device.VirtualDeviceSpec(device=currentnic, operation="edit")
devconfspec.append(nicspec)
continue
nicname = 'Network Adapter %d' % (index + 1)
nictype = net['type'] if isinstance(net, dict) and 'type' in net else None
if netname in self.portgs:
switchuuid = self.portgs[netname][0]
portgroupkey = self.portgs[netname][1]
nicspec = createdvsnicspec(nicname, netname, switchuuid, portgroupkey, nictype=nictype)
else:
nicspec = createnicspec(nicname, netname, nictype=nictype)
devconfspec.append(nicspec)
if iso:
if '/' not in iso:
matchingisos = [i for i in self._getisos() if i.endswith(iso)]
if matchingisos:
iso = matchingisos[0]
else:
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
cdspec = createisospec(iso)
devconfspec.append(cdspec)
# bootoptions = vim.option.OptionValue(key='bios.bootDeviceClasses',value='allow:hd,cd,fd,net')
# confspec.bootOptions = vim.vm.BootOptions(bootOrder=[vim.vm.BootOptions.BootableCdromDevice()])
confspec.deviceChange = devconfspec
t = vm.Reconfigure(confspec)
waitForMe(t)
if start:
t = vm.PowerOnVM_Task(None)
waitForMe(t)
return {'result': 'success'}
def start(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOff":
t = vm.PowerOnVM_Task(None)
waitForMe(t)
return {'result': 'success'}
def stop(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
return {'result': 'success'}
def status(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
return vm.runtime.powerState if vm is not None else ''
def delete(self, name, snapshots=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
plan, image = 'kvirt', None
vmpath = vm.summary.config.vmPathName.replace('/%s.vmx' % name, '')
for entry in vm.config.extraConfig:
if entry.key == 'image':
image = entry.value
if entry.key == 'plan':
plan = entry.value
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
t = vm.Destroy_Task()
waitForMe(t)
if image is not None and 'coreos' not in image and 'rhcos' not in image and\
'fcos' not in image and vmpath.endswith(name):
isopath = "%s/%s.ISO" % (self.isofolder, name) if self.isofolder is not None else vmpath
deletedirectory(si, dc, isopath)
if plan != 'kvirt':
planfolder = find(si, vmFolder, vim.Folder, plan)
if planfolder is not None and len(planfolder.childEntity) == 0:
planfolder.Destroy()
return {'result': 'success'}
def console(self, name, tunnel=False, web=False):
si = self.si
dc = self.dc
vcip = self.vcip
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
print("VM %s not found" % name)
return
elif vm.runtime.powerState == "poweredOff":
print("VM down")
return
extraconfig = vm.config.extraConfig
vncfound = False
for extra in extraconfig:
key, value = extra.key, extra.value
if 'vnc' in key and 'port' in key:
vncfound = True
vncport = value
break
else:
continue
if vncfound:
host = vm.runtime.host.name
url = "vnc://%s:%s" % (host, vncport)
consolecommand = "remote-viewer %s &" % (url)
if web:
return url
if self.debug or os.path.exists("/i_am_a_container"):
print(consolecommand)
if not os.path.exists("/i_am_a_container"):
os.popen(consolecommand)
else:
content = si.RetrieveContent()
sgid = content.about.instanceUuid
cert = get_server_certificate((self.vcip, 443))
cert_deserialize = x509.load_pem_x509_certificate(cert.encode(), default_backend())
finger_print = hexlify(cert_deserialize.fingerprint(hashes.SHA1())).decode('utf-8')
sha1 = ":".join([finger_print[i: i + 2] for i in range(0, len(finger_print), 2)])
vcenter_data = content.setting
vcenter_settings = vcenter_data.setting
for item in vcenter_settings:
key = getattr(item, 'key')
if key == 'VirtualCenter.FQDN':
fqdn = getattr(item, 'value')
sessionmanager = si.content.sessionManager
session = sessionmanager.AcquireCloneTicket()
vmid = vm._moId
vmurl = "https://%s/ui/webconsole.html?" % vcip
vmurl += "vmId=%s&vmName=%s&serverGuid=%s&host=%s&sessionTicket=%s&thumbprint=%s" % (vmid, name, sgid, fqdn,
session, sha1)
if web:
return vmurl
if self.debug or os.path.exists("/i_am_a_container"):
msg = "Open the following url:\n%s" % vmurl if os.path.exists("/i_am_a_container") else vmurl
pprint(msg)
else:
pprint("Opening url %s" % vmurl)
webbrowser.open(vmurl, new=2, autoraise=True)
def info(self, name, output='plain', fields=[], values=False, vm=None, debug=False):
translation = {'poweredOff': 'down', 'poweredOn': 'up', 'suspended': 'suspended'}
yamlinfo = {}
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
if vm is None:
vm = findvm(si, vmFolder, name)
if vm is None:
error("VM %s not found" % name)
return {}
summary = vm.summary
yamlinfo['name'] = name
yamlinfo['id'] = summary.config.instanceUuid
yamlinfo['cpus'] = vm.config.hardware.numCPU
yamlinfo['memory'] = vm.config.hardware.memoryMB
yamlinfo['status'] = translation[vm.runtime.powerState]
yamlinfo['nets'] = []
yamlinfo['disks'] = []
devices = vm.config.hardware.device
mainmac = None
for number, dev in enumerate(devices):
if "addressType" in dir(dev):
try:
network = dev.backing.deviceName
except:
switchuuid = dev.backing.port.switchUuid
portgroupkey = dev.backing.port.portgroupKey
for dvsnet in self.portgs:
if self.portgs[dvsnet][0] == switchuuid and self.portgs[dvsnet][1] == portgroupkey:
network = dvsnet
device = dev.deviceInfo.label
devicename = type(dev).__name__.replace('vim.vm.device.Virtual', '').lower()
networktype = devicename
mac = dev.macAddress
if mainmac is None:
mainmac = mac
net = {'device': device, 'mac': mac, 'net': network, 'type': networktype}
yamlinfo['nets'].append(net)
if type(dev).__name__ == 'vim.vm.device.VirtualDisk':
device = "disk%s" % dev.unitNumber
disksize = convert(1000 * dev.capacityInKB, GB=False)
diskformat = dev.backing.diskMode
drivertype = 'thin' if dev.backing.thinProvisioned else 'thick'
path = dev.backing.datastore.name
disk = {'device': device, 'size': int(disksize), 'format': diskformat, 'type': drivertype, 'path': path}
yamlinfo['disks'].append(disk)
if vm.runtime.powerState == "poweredOn":
yamlinfo['host'] = vm.runtime.host.name
for nic in vm.guest.net:
currentmac = nic.macAddress
currentips = nic.ipAddress
if currentmac == mainmac and currentips:
yamlinfo['ip'] = currentips[0]
for entry in vm.config.extraConfig:
if entry.key in METADATA_FIELDS:
yamlinfo[entry.key] = entry.value
if entry.key == 'image':
yamlinfo['user'] = common.get_user(entry.value)
if debug:
yamlinfo['debug'] = vm.config.extraConfig
return yamlinfo
def list(self):
rootFolder = self.rootFolder
si = self.si
vms = []
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.VirtualMachine], True)
vmlist = collectproperties(si, view=view, objtype=vim.VirtualMachine, pathset=['name'], includemors=True)
for o in vmlist:
vm = o['obj']
if vm.summary.runtime.connectionState != 'orphaned' and not vm.config.template:
if self.filtervms and 'plan' not in [x.key for x in vm.config.extraConfig]:
continue
vms.append(self.info(o['name'], vm=vm))
return sorted(vms, key=lambda x: x['name'])
def list_pools(self):
pools = []
rootFolder = self.rootFolder
si = self.si
# dc = self.dc
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
for dts in clu.datastore:
pools.append(dts.name)
# datastorename = dts.name
# total = dssize(dts)[0].replace('GB', '')
# available = dssize(dts)[1].replace('GB', '')
# results[datastorename] = [float(total), float(available), dc.name]
return pools
def beststorage(self):
rootFolder = self.rootFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
bestds = ''
bestsize = 0
for dts in clu.datastore:
datastorename = dts.name
available = float(dssize(dts)[1].replace('GB', ''))
if available > bestsize:
bestsize = available
bestds = datastorename
return bestds
def _getisos(self):
rootFolder = self.rootFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
isos = []
results = {}
searchspec = vim.host.DatastoreBrowser.SearchSpec()
filequery = [vim.host.DatastoreBrowser.IsoImageQuery(), vim.host.DatastoreBrowser.FolderQuery()]
filequeryflags = vim.host.DatastoreBrowser.FileInfo.Details()
filequeryflags.fileSize = True
filequeryflags.modification = False
filequeryflags.fileOwner = False
filequeryflags.fileType = False
searchspec.query = filequery
searchspec.details = filequeryflags
searchspec.sortFoldersFirst = True
searchspec.searchCaseInsensitive = True
for dts in clu.datastore:
datastorename = dts.name
datastorepath = "[" + datastorename + "]"
browser = dts.browser
t = browser.SearchDatastore_Task(datastorepath, searchspec)
waitForMe(t)
result = t.info.result
fileinfo = result.file
for element in fileinfo:
folderpath = element.path
if not folderpath.endswith('iso') and 'ISO' in folderpath.upper():
t = browser.SearchDatastoreSubFolders_Task("%s%s" % (datastorepath, folderpath), searchspec)
waitForMe(t)
results = t.info.result
for r in results:
fileinfo = r.file
for isofile in fileinfo:
path = isofile.path
if path.endswith('.iso'):
isos.append("%s/%s/%s" % (datastorepath, folderpath, path))
return isos
def volumes(self, iso=False):
if iso:
return self._getisos()
si = self.si
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.VirtualMachine], True)
vmlist = o.view
o.Destroy()
return [v.name for v
in vmlist if v.config.template and v.summary is not
None and v.summary.runtime.connectionState != 'orphaned']
def update_metadata(self, name, metatype, metavalue, append=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
configspec = vim.vm.ConfigSpec()
opt = vim.option.OptionValue()
opt.key = metatype
opt.value = metavalue
configspec.extraConfig = [opt]
t = vm.ReconfigVM_Task(configspec)
waitForMe(t)
def update_memory(self, name, memory):
print("not implemented")
return
def update_cpus(self, name, numcpus):
print("not implemented")
return
def update_start(self, name, start=True):
print("not implemented")
return
def update_information(self, name, information):
self.update_metadata(name, 'information', information)
return
def update_iso(self, name, iso):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
isos = [i for i in self._getisos() if i.endswith(iso)]
if not isos:
error("Iso %s not found.Leaving..." % iso)
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
else:
iso = isos[0]
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
c = changecd(self.si, vm, iso)
waitForMe(c)
return {'result': 'success'}
def dnsinfo(self, name):
return None, None
def _uploadimage(self, pool, origin, directory, isofolder=None):
si = self.si
rootFolder = self.rootFolder
datastore = find(si, rootFolder, vim.Datastore, pool)
if not datastore:
return {'result': 'failure', 'reason': "Pool %s not found" % pool}
destination = os.path.basename(origin)
if isofolder is not None:
directory = isofolder
url = "https://%s:443/folder/%s/%s?dcPath=%s&dsName=%s" % (self.vcip, directory, destination, self.dc.name,
pool)
client_cookie = si._stub.cookie
cookie_name = client_cookie.split("=", 1)[0]
cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[1].split(";", 1)[0].lstrip()
cookie_text = " " + cookie_value + "; $" + cookie_path
cookie = {cookie_name: cookie_text}
headers = {'Content-Type': 'application/octet-stream'}
with open(origin, "rb") as f:
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
try:
r = requests.put(url, data=f, headers=headers, cookies=cookie, verify=False)
except:
url = url.replace('/folder', '')
r = requests.put(url, data=f, headers=headers, cookies=cookie, verify=False)
if r.status_code not in [200, 201]:
error("Got status %s with reason: %s" % (r.status_code, r.reason))
def get_pool_path(self, pool):
return pool
def add_disk(self, name, size=1, pool=None, thin=True, image=None, shareable=False, existing=None,
interface='virtio', novm=False, overrides={}):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
spec = vim.vm.ConfigSpec()
unit_number = 0
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
unit_number = int(dev.unitNumber) + 1
if unit_number == 7:
unit_number = 8
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
new_disk_kb = int(size) * 1024 * 1024
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.thinProvisioned = thin
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.unitNumber = unit_number
disk_spec.device.capacityInKB = new_disk_kb
disk_spec.device.controllerKey = controller.key
dev_changes = [disk_spec]
spec.deviceChange = dev_changes
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
def delete_disk(self, name=None, diskname=None, pool=None, novm=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) and dev.deviceInfo.label == diskname:
devspec = vim.vm.device.VirtualDeviceSpec()
devspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
devspec.device = dev
spec = vim.vm.ConfigSpec()
spec.deviceChange = [devspec]
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
return {'result': 'failure', 'reason': "Disk %s not found in %s" % (diskname, name)}
def add_nic(self, name, network):
if network == 'default':
network = 'VM Network'
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
spec = vim.vm.ConfigSpec()
nicnumber = len([dev for dev in vm.config.hardware.device if "addressType" in dir(dev)])
nicname = 'Network adapter %d' % (nicnumber + 1)
nicspec = createnicspec(nicname, network)
nic_changes = [nicspec]
spec.deviceChange = nic_changes
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
def delete_nic(self, name, interface):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualEthernetCard) and dev.deviceInfo.label == interface:
devspec = vim.vm.device.VirtualDeviceSpec()
devspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
devspec.device = dev
spec = vim.vm.ConfigSpec()
spec.deviceChange = [devspec]
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
return {'result': 'failure', 'reason': "Nic %s not found in %s" % (interface, name)}
def list_networks(self):
si = self.si
rootFolder = si.content.rootFolder
networks = {}
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
dvslist = collectproperties(si, view=view, objtype=vim.dvs.DistributedVirtualPortgroup, pathset=['name'],
includemors=True)
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.Network], True)
netlist = collectproperties(si, view=view, objtype=vim.Network, pathset=['name'], includemors=True)
for o in netlist:
network = o['obj']
cidr, dhcp, domainname = '', '', ''
mode = 'accessible' if network.summary.accessible else 'notaccessible'
networks[network.name] = {'cidr': cidr, 'dhcp': dhcp, 'domain': domainname, 'type': 'routed', 'mode': mode}
for o in dvslist:
network = o['obj']
cidr, dhcp, domainname, mode = '', '', '', ''
networks[network.name] = {'cidr': cidr, 'dhcp': dhcp, 'domain': domainname, 'type': 'routed', 'mode': mode}
return networks
def create_network(self, name, cidr=None, dhcp=True, nat=True, domain=None, plan='kvirt', overrides={}):
si = self.si
cluster = self.clu
networkFolder = self.dc.networkFolder
rootFolder = self.rootFolder
net = find(si, rootFolder, vim.Network, name)
if net is not None:
return {'result': 'failure', 'reason': "Network %s already there" % name}
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.DistributedVirtualSwitch], True)
dvnetworks = o.view
o.Destroy()
for dvnetw in dvnetworks:
for portg in dvnetw.portgroup:
if portg.name == name:
return {'result': 'failure', 'reason': "Network %s already there" % name}
if overrides.get('distributed', False):
pnic_specs = []
dvs_host_configs = []
uplink_port_names = []
dvs_create_spec = vim.DistributedVirtualSwitch.CreateSpec()
dvs_config_spec = vim.DistributedVirtualSwitch.ConfigSpec()
dvs_config_spec.name = name
dvs_config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
for x in range(len(cluster.host)):
uplink_port_names.append("dvUplink%d" % x)
for host in cluster.host:
dvs_config_spec.uplinkPortPolicy.uplinkPortName = uplink_port_names
dvs_config_spec.maxPorts = 2000
pnic_spec = vim.dvs.HostMember.PnicSpec()
pnic_spec.pnicDevice = 'vmnic1'
pnic_specs.append(pnic_spec)
dvs_host_config = vim.dvs.HostMember.ConfigSpec()
dvs_host_config.operation = vim.ConfigSpecOperation.add
dvs_host_config.host = host
dvs_host_configs.append(dvs_host_config)
dvs_host_config.backing = vim.dvs.HostMember.PnicBacking()
dvs_host_config.backing.pnicSpec = pnic_specs
dvs_config_spec.host = dvs_host_configs
dvs_create_spec.configSpec = dvs_config_spec
dvs_create_spec.productInfo = vim.dvs.ProductSpec(version='5.1.0')
networkFolder.CreateDistributedVirtualSwitch()
else:
return {'result': 'failure', 'reason': "Not implemented yet for non dvs networks"}
return {'result': 'success'}
def delete_network(self, name=None, cidr=None):
si = self.si
rootFolder = self.rootFolder
try:
net = find(si, rootFolder, vim.dvs.DistributedVirtualPortgroup, name)
net.Destroy()
except:
try:
net = find(si, rootFolder, vim.Network, name)
net.Destroy()
except:
return {'result': 'failure', 'reason': "Network %s not found" % name}
return {'result': 'success'}
def vm_ports(self, name):
return []
def add_image(self, url, pool, short=None, cmd=None, name=None, size=None):
si = self.si
rootFolder = self.rootFolder
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
resourcepool = clu.resourcePool
vmFolder = self.dc.vmFolder
manager = si.content.ovfManager
shortimage = os.path.basename(url).split('?')[0]
if not shortimage.endswith('ova') and not shortimage.endswith('zip') and find_executable('qemu-img') is None:
msg = "qemu-img is required for conversion"
error(msg)
return {'result': 'failure', 'reason': msg}
if name is None:
name = name.replace('.ova', '').replace('.x86_64', '')
if shortimage in self.volumes():
pprint("Template %s already there" % shortimage)
return {'result': 'success'}
if not find(si, rootFolder, vim.Datastore, pool):
return {'result': 'failure', 'reason': "Pool %s not found" % pool}
if not os.path.exists('/tmp/%s' % shortimage):
pprint("Downloading locally %s" % shortimage)
downloadcmd = "curl -Lo /tmp/%s -f '%s'" % (shortimage, url)
code = os.system(downloadcmd)
if code != 0:
return {'result': 'failure', 'reason': "Unable to download indicated image"}
else:
pprint("Using found /tmp/%s" % shortimage)
vmdk_path = None
ovf_path = None
if url.endswith('zip'):
with ZipFile("/tmp/%s" % shortimage) as zipf:
for _fil in zipf.namelist():
if _fil.endswith('vmdk'):
vmdk_path = '/tmp/%s' % _fil
elif _fil.endswith('ovf'):
ovf_path = '/tmp/%s' % _fil
if vmdk_path is None or ovf_path is None:
return {'result': 'failure', 'reason': "Incorrect ova file"}
zipf.extractall('/tmp')
elif url.endswith('ova'):
with tarfile.open("/tmp/%s" % shortimage) as tar:
for _fil in [x.name for x in tar.getmembers()]:
if _fil.endswith('vmdk'):
vmdk_path = '/tmp/%s' % _fil
elif _fil.endswith('ovf'):
ovf_path = '/tmp/%s' % _fil
if vmdk_path is None or ovf_path is None:
return {'result': 'failure', 'reason': "Incorrect ova file"}
tar.extractall()
else:
extension = os.path.splitext(shortimage)[1].replace('.', '')
vmdk_path = "/tmp/%s" % shortimage.replace(extension, 'vmdk')
if not os.path.exists(vmdk_path):
pprint("Converting qcow2 file to vmdk")
os.popen("qemu-img convert -O vmdk -o subformat=streamOptimized /tmp/%s %s" % (shortimage, vmdk_path))
ovf_path = "/tmp/%s" % shortimage.replace(extension, 'ovf')
commondir = os.path.dirname(common.pprint.__code__.co_filename)
time.sleep(5)
vmdk_info = json.loads(os.popen("qemu-img info %s --output json" % vmdk_path).read())
virtual_size = vmdk_info['virtual-size']
actual_size = vmdk_info['actual-size']
ovfcontent = open("%s/vm.ovf.j2" % commondir).read().format(name=shortimage, virtual_size=virtual_size,
actual_size=actual_size)
with open(ovf_path, 'w') as f:
f.write(ovfcontent)
ovfd = open(ovf_path).read()
ovfd = re.sub('<Name>.*</Name>', '<Name>%s</Name>' % name, ovfd)
datastore = find(si, rootFolder, vim.Datastore, pool)
network = find(si, rootFolder, vim.Network, 'VM Network')
networkmapping = vim.OvfManager.NetworkMapping.Array()
nm = vim.OvfManager.NetworkMapping(name="VM Network", network=network)
networkmapping.append(nm)
spec_params = vim.OvfManager.CreateImportSpecParams(diskProvisioning="thin", networkMapping=networkmapping)
import_spec = manager.CreateImportSpec(ovfd, resourcepool, datastore, spec_params)
lease = resourcepool.ImportVApp(import_spec.importSpec, vmFolder)
time.sleep(10)
while True:
if lease.state == vim.HttpNfcLease.State.ready:
pprint("Uploading vmdk")
host = self._getfirshost()
url = lease.info.deviceUrl[0].url.replace('*', host.name)
keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
keepalive_thread.start()
curl_cmd = (
"curl -Ss -X POST --insecure -T %s -H 'Content-Type: \
application/x-vnd.vmware-streamVmdk' %s" % (vmdk_path, url))
os.system(curl_cmd)
lease.HttpNfcLeaseComplete()
keepalive_thread.join()
self.export(name)
os.remove('/tmp/%s' % shortimage)
os.remove(ovf_path)
os.remove(vmdk_path)
return {'result': 'success'}
elif lease.state == vim.HttpNfcLease.State.error:
error("Lease error: %s" % lease.error)
os._exit(1)
def _getfirshost(self):
si = self.si
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.HostSystem], True)
view = o.view
o.Destroy()
host = view[0] if view else None
return host
def report(self):
si = self.si
about = si.content.about
print("Host: %s" % self.vcip)
print("Datacenter: %s" % self.dc.name)
print("Version: %s" % about.version)
print("Api Version: %s" % about.apiVersion)
print("Datacenter: %s" % self.dc.name)
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.HostSystem], True)
view = o.view
o.Destroy()
for h in view:
print("Host: %s" % h.name)
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.ComputeResource], True)
view = o.view
o.Destroy()
for clu in view:
print("Cluster: %s" % clu.name)
for dts in clu.datastore:
print("Pool: %s" % dts.name)
def delete_image(self, image, pool=None):
si = self.si
vmFolder = self.dc.vmFolder
vm = findvm(si, vmFolder, image)
if vm is None or not vm.config.template:
return {'result': 'failure', 'reason': 'Image %s not found' % image}
else:
t = vm.Destroy_Task()
waitForMe(t)
return {'result': 'success'}
def export(self, name, image=None):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
vm.MarkAsTemplate()
if image is not None:
vm.Rename(image)
return {'result': 'success'}
def list_dns(self, domain):
return []
|
client.py
|
import socket
from tkinter import *
from threading import Thread
import random
from PIL import ImageTk, Image
screen_width = None
screen_height = None
SERVER = None
PORT = None
IP_ADDRESS = None
playerName = None
canvas1 = None
nameEntry = None
nameWindow = None
def saveName():
global SERVER
global playerName
global nameWindow
global nameEntry
playerName = nameEntry.get()
nameEntry.delete(0, END)
nameWindow.destroy()
SERVER.send(playerName.encode())
def askPlayerName():
global playerName
global nameEntry
global nameWindow
global canvas1
nameWindow = Tk()
nameWindow.title("Tambola Family Fun")
nameWindow.geometry('800x600')
screen_width = nameWindow.winfo_screenwidth()
screen_height = nameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas1 = Canvas( nameWindow, width = 500,height = 500)
canvas1.pack(fill = "both", expand = True)
# Display image
canvas1.create_image( 0, 0, image = bg, anchor = "nw")
canvas1.create_text( screen_width/4.5,screen_height/8, text = "Enter Name", font=("Chalkboard SE",60), fill="black")
nameEntry = Entry(nameWindow, width=15, justify='center', font=('Chalkboard SE', 30), bd=5, bg='white')
nameEntry.place(x = screen_width/7, y=screen_height/5.5 )
button = Button(nameWindow, text="Save", font=("Chalkboard SE", 30),width=11, command=saveName, height=2, bg="#80deea", bd=3)
button.place(x = screen_width/6, y=screen_height/4)
nameWindow.resizable(True, True)
nameWindow.mainloop()
def recivedMsg():
pass
def setup():
global SERVER
global PORT
global IP_ADDRESS
PORT = 6000
IP_ADDRESS = '127.0.0.1'
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((IP_ADDRESS, PORT))
thread = Thread(target=recivedMsg)
thread.start()
askPlayerName()
setup()
|
mock_ms_client.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Mocked MindSpore debugger client."""
from threading import Thread
from time import sleep
import grpc
import numpy as np
from mindinsight.debugger.proto import ms_graph_pb2
from mindinsight.debugger.proto.debug_grpc_pb2 import Metadata, WatchpointHit, Chunk, EventReply
from mindinsight.debugger.proto.debug_grpc_pb2_grpc import EventListenerStub
from mindinsight.debugger.proto.ms_graph_pb2 import TensorProto, DataType
from tests.st.func.debugger.conftest import GRAPH_PROTO_FILE
class MockDebuggerClient:
"""Mocked Debugger client."""
def __init__(self, hostname='localhost:50051', backend='Ascend', graph_num=1, ms_version='1.1.0'):
channel = grpc.insecure_channel(hostname)
self.stub = EventListenerStub(channel)
self.flag = True
self._step = 0
self._watchpoint_id = 0
self._leaf_node = []
self._cur_node = ''
self._backend = backend
self._graph_num = graph_num
self._ms_version = ms_version
def _clean(self):
"""Clean cache."""
self._step = 0
self._watchpoint_id = 0
self._leaf_node = []
self._cur_node = ''
def get_thread_instance(self):
"""Get debugger client thread."""
return MockDebuggerClientThread(self)
def next_node(self, name=None):
"""Update the current node to next node."""
if not self._cur_node:
self._cur_node = self._leaf_node[0]
return
cur_index = self._leaf_node.index(self._cur_node)
# if name is not None, go to the specified node.
if not name:
next_index = cur_index + 1
else:
next_index = self._leaf_node.index(name)
# update step
if next_index <= cur_index or next_index == len(self._leaf_node):
self._step += 1
# update current node
if next_index == len(self._leaf_node):
self._cur_node = self._leaf_node[0]
else:
self._cur_node = self._leaf_node[next_index]
def command_loop(self):
"""Wait for the command."""
total_steps = 100
wait_flag = True
while self.flag and wait_flag:
if self._step > total_steps:
sleep(0.5)
self.send_metadata_cmd(training_done=True)
return
wait_flag = self._wait_cmd()
def _wait_cmd(self):
"""Wait for command and deal with command."""
metadata = self.get_metadata_cmd()
response = self.stub.WaitCMD(metadata)
assert response.status == EventReply.Status.OK
if response.HasField('run_cmd'):
self._deal_with_run_cmd(response)
elif response.HasField('view_cmd'):
for tensor in response.view_cmd.tensors:
self.send_tensor_cmd(in_tensor=tensor)
elif response.HasField('set_cmd'):
self._watchpoint_id += 1
elif response.HasField('exit'):
self._watchpoint_id = 0
self._step = 0
return False
return True
def _deal_with_run_cmd(self, response):
self._step += response.run_cmd.run_steps
if response.run_cmd.run_level == 'node':
self.next_node(response.run_cmd.node_name)
if self._watchpoint_id > 0:
self.send_watchpoint_hit()
def get_metadata_cmd(self, training_done=False):
"""Construct metadata message."""
metadata = Metadata()
metadata.device_name = '0'
metadata.cur_step = self._step
metadata.cur_node = self._cur_node
metadata.backend = self._backend
metadata.training_done = training_done
metadata.ms_version = self._ms_version
return metadata
def send_metadata_cmd(self, training_done=False):
"""Send metadata command."""
self._clean()
metadata = self.get_metadata_cmd(training_done)
response = self.stub.SendMetadata(metadata)
assert response.status == EventReply.Status.OK
if response.HasField('version_matched') and response.version_matched is False:
self.command_loop()
if training_done is False:
self.send_graph_cmd()
def send_graph_cmd(self):
"""Send graph to debugger server."""
self._step = 1
if self._graph_num > 1:
chunks = []
for i in range(self._graph_num):
chunks.extend(self._get_graph_chunks('graph_' + str(i)))
response = self.stub.SendMultiGraphs(self._generate_graph(chunks))
else:
chunks = self._get_graph_chunks()
response = self.stub.SendGraph(self._generate_graph(chunks))
assert response.status == EventReply.Status.OK
# go to command loop
self.command_loop()
def _get_graph_chunks(self, graph_name='graph_0'):
"""Get graph chunks."""
with open(GRAPH_PROTO_FILE, 'rb') as file_handle:
content = file_handle.read()
size = len(content)
graph = ms_graph_pb2.GraphProto()
graph.ParseFromString(content)
graph.name = graph_name
content = graph.SerializeToString()
self._leaf_node = [node.full_name for node in graph.node]
# the max limit of grpc data size is 4kb
# split graph into 3kb per chunk
chunk_size = 1024 * 1024 * 3
chunks = []
for index in range(0, size, chunk_size):
sub_size = min(chunk_size, size - index)
sub_chunk = Chunk(buffer=content[index: index + sub_size])
chunks.append(sub_chunk)
chunks[-1].finished = True
return chunks
@staticmethod
def _generate_graph(chunks):
"""Construct graph generator."""
for buffer in chunks:
yield buffer
def send_tensor_cmd(self, in_tensor=None):
"""Send tensor info with value."""
response = self.stub.SendTensors(self.generate_tensor(in_tensor))
assert response.status == EventReply.Status.OK
@staticmethod
def generate_tensor(in_tensor=None):
"""Generate tensor message."""
tensor_content = np.asarray([1, 2, 3, 4, 5, 6]).astype(np.float32).tobytes()
tensors = [TensorProto(), TensorProto()]
tensors[0].CopyFrom(in_tensor)
tensors[0].data_type = DataType.DT_FLOAT32
tensors[0].dims.extend([2, 3])
tensors[1].CopyFrom(tensors[0])
tensors[0].tensor_content = tensor_content[:12]
tensors[1].tensor_content = tensor_content[12:]
tensors[0].finished = 0
tensors[1].finished = 1
for sub_tensor in tensors:
yield sub_tensor
def send_watchpoint_hit(self):
"""Send watchpoint hit value."""
tensors = [TensorProto(node_name='Default/TransData-op99', slot='0'),
TensorProto(node_name='Default/optimizer-Momentum/ApplyMomentum-op25', slot='0')]
response = self.stub.SendWatchpointHits(self._generate_hits(tensors))
assert response.status == EventReply.Status.OK
@staticmethod
def _generate_hits(tensors):
"""Construct watchpoint hits."""
for tensor in tensors:
hit = WatchpointHit()
hit.id = 1
hit.tensor.CopyFrom(tensor)
yield hit
class MockDebuggerClientThread:
"""Mocked debugger client thread."""
def __init__(self, debugger_client):
self._debugger_client = debugger_client
self._debugger_client_thread = Thread(target=debugger_client.send_metadata_cmd)
def __enter__(self, backend='Ascend'):
self._debugger_client.flag = True
self._debugger_client_thread.start()
return self._debugger_client_thread
def __exit__(self, exc_type, exc_val, exc_tb):
self._debugger_client_thread.join(timeout=2)
self._debugger_client.flag = False
|
handler.py
|
import threading
import weakref
import paramiko
import tornado
from tornado import gen
import tornado.web
import tornado.websocket
from concurrent.futures import Future
from tornado.ioloop import IOLoop
from tornado.iostream import _ERRNO_CONNRESET
from tornado.util import errno_from_exception
BUF_SIZE = 1024
worker_dic = {}
class IndexHandler(tornado.web.RequestHandler):
def initialize(self, loop):
print('i initialize')
self.loop = loop
def get(self):
print('i get')
self.render('index.html')
# self.render('test.html')
def get_port(self):
value = self.get_value('port')
try:
port = int(value)
except ValueError:
raise ValueError('Invalid port {}'.format(value))
if 0 < port < 65536:
return port
def get_value(self, name):
value = self.get_argument(name)
if not value:
raise ValueError('Empty {}'.format(name))
return value
def get_args(self):
hostname = self.get_value('hostname')
username = self.get_value('username')
password = self.get_value('password')
port = self.get_port()
args = (hostname, port, username, password)
print("args >>>",args)
return args
@gen.coroutine
def post(self):
print('i post')
worker_id = None
status = None
future = Future()
# t = threading.Thread(target=self.ssh_connect_wrapped, args=(future,))
t = threading.Thread(target=self.ssh_connect, args=(future,))
t.setDaemon(True) # 守护线程
t.start()
print('线程ID>> ', t.ident)
try:
# 通过这个yield,不管上面线程处理的函数有没有完成,都可以继续处理别的请求
# yield关键字和return类似,都是返回结果,这符合了非阻塞io,能够立刻返回结果
# 异步是如何实现的,future 方法,通过future的 result 和请求的进程进行交流。
worker = yield future # set_result(worker)
print('work_id >>> ', worker.id, type(worker))
except Exception as e:
status = str(e)
else:
worker_id = worker.id # 获取yield返回对象的唯一值
worker_dic[worker_id] = worker
print('loop前')
self.loop.call_later(3, self.recycle_worker, worker) # 每隔3s去找recycle_worker
print('loop后')
self.write(dict(id=worker_id, status=status))
def ssh_connect(self, future):
print('i ssh_conn')
try:
ssh_client = paramiko.SSHClient() # 创建实例
# sshclient.load_system_host_keys() 加载know_host文件
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # 这行代码的作用是允许连接不在know_hosts文件中的主机。
args = self.get_args()
# ssh_client.connect('10.0.0.129', int(22), 'KFleader', '123456')
ssh_client.connect(*args, timeout=6)
chan = ssh_client.invoke_shell(term='xterm') # 启动一个交互式shell会话,trem 传入要模拟的终端类型
chan.setblocking(0) # 非阻塞
src_addr = self.get_client_addr()
worker = Worker(self.loop, ssh_client, chan, src_addr)
print("ssh ")
except Exception as e:
future.set_exception(e)
else:
future.set_result(worker) # 完成后调用future.set_result设置值,必须给个值,yield下面才会继续执行
return worker
def get_client_addr(self):
print('i get_client')
'''获取真实ip'''
ip = self.request.headers.get('X-Real-Ip')
port = self.request.headers.get('X-Real-Port')
if ip and port:
real_ip = (ip, port)
else:
real_ip = self.request.connection.stream.socket.getpeername()
return real_ip
def recycle_worker(self, worker):
print('i recycle')
if not worker.handler:
worker_dic.pop(worker.id, None)
worker.close()
class WsockHandler(tornado.websocket.WebSocketHandler):
def initialize(self, loop):
print('w initialize')
self.loop = loop
self.worker_ref = None
def get_client_addr(self):
print('w get_client_addr')
ip = self.request.headers.get('X-Real-Ip')
port = self.request.headers.get('X-Real-Port')
if ip and port:
real_ip = (ip, port)
else:
real_ip = self.stream.socket.getpeername()
return real_ip
def open(self):
print('w hello open')
current_src_addr = self.get_client_addr()
current_id = self.get_argument('id') # RequestHandler下的get_argument 返回你获取的值 ws?id=4384194064
current_worker = worker_dic.get(current_id)
worker_src_addr = current_worker.src_addr[0]
if current_worker and worker_src_addr == current_src_addr[0]:
worker_dic.pop(current_worker.id)
self.set_nodelay(True) # 默认情况下,小消息会会延迟,设置这项减少延迟
current_worker.set_handler(self) # 把自己传过去,在worker里需要调用本模块进行写入等操作
self.worker_ref = weakref.ref(current_worker) # 弱引用,对象引用计数为0或只有弱引用时将回收对象
# add_handler(fd, handler, events) 注册一个handler,从fd那里接受事件。
# fd呢就是一个描述符,events就是要监听的事件。events有这样几种类型,IOLoop.READ, IOLoop.WRITE, 还有IOLoop.ERROR.
# 当我们选定的类型事件发生的时候,那么就会执行handler(fd, events) ==> worker(fd,events)
# 我们监听了 read 事件
self.loop.add_handler(current_worker.fd, current_worker, IOLoop.READ)
else:
self.close()
def on_message(self, message):
print("on_message", type(message), message)
worker = self.worker_ref()
worker.data_to_dst.append(message)
worker.on_write()
def on_close(self):
print("w on_close")
worker = self.worker_ref() if self.worker_ref else None
if worker:
worker.close()
class Worker(object):
def __init__(self, loop, ssh, chan, src_addr):
self.loop = loop
self.ssh = ssh
self.chan = chan
self.src_addr = src_addr
self.fd = chan.fileno()
self.id = str(id(self)) # 返回对象的一个唯一身份值
self.data_to_dst = []
self.handler = None
self.mode = IOLoop.READ
print('self.id >>>', self.id)
def __call__(self, fd, events):
if events & IOLoop.READ:
print(">>>>>>>>")
self.on_read()
def set_handler(self, handler):
print('what self >>>', handler) ## handler ==> <handler.WsockHandler object at 0x10bf6ea20>
if not self.handler:
print('handler NG')
self.handler = handler
def update_handler(self, mode):
print("mode>>>", mode, self.mode)
if self.mode != mode:
self.loop.update_handler(self.fd, mode)
self.mode = mode
def on_read(self):
'''服务器端的数据 '''
print("on_read")
try:
data = self.chan.recv(BUF_SIZE) # 从频道接收数据。返回值是表示接收到的数据的字符串.
print("接收到的数据>>>", data)
if not data:
self.close()
except (OSError, IOError) as e:
if errno_from_exception(e) in _ERRNO_CONNRESET:
self.close()
else:
try:
self.handler.write_message(data)
except tornado.websocket.WebSocketClosedError:
self.close()
def on_write(self):
print("on_write")
if not self.data_to_dst:
print("no data_to_dst")
return
data = ''.join(self.data_to_dst)
try:
sent = self.chan.send(data)
except (OSError, IOError) as e:
if errno_from_exception(e) in _ERRNO_CONNRESET:
self.close()
else:
self.update_handler(IOLoop.WRITE)
else:
self.data_to_dst = []
data = data[sent:]
if data:
self.data_to_dst.append(data)
self.update_handler(IOLoop.WRITE)
else:
self.update_handler(IOLoop.READ)
def close(self):
print("bye 呀")
if self.handler:
self.loop.remove_handler(self.fd)
self.handler.close()
self.chan.close()
self.ssh.close()
|
run.py
|
#!/usr/bin/python
from Adafruit_PWM_Servo_Driver import PWM
import time
import requests
import threading
pwm = PWM(0x40)
servoMin = 160 # Min pulse length out of 4096
servoMax = 600 # Max pulse length out of 4096
citation_min = 0
citation_max = 360
value = servoMin
current_value = servoMin
step = 10
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.setPWM(channel, 0, pulse)
def fetch_loop():
"""Fetch the most recent 5-minute bucket in a loop"""
global value
while True:
try:
print("Fetch")
# Get the second bucket, as the first one constantly changes.
history = requests.get("http://wikipedia.labs.crossref.org/status").json()['citation-history']
last_val = history[1]
print("History")
print(history)
per_hour = min(last_val * 12, servoMax)
proportion = (per_hour / float(citation_max))
value = proportion * (servoMax - servoMin) + servoMin
print("Last value: %d, per hour: %d, proportion: %f, value: %d" % (last_val, per_hour, proportion, value))
except Exception, e:
# If we get an error fetching ignore and try again next time. This will happen first time as the network is coming up.
print("Error %s" % e)
# Bucket is updated every 5 minutes. Fetch every minute to minimise aliasing.
time.sleep(60)
def run_background():
"""Run the background data fetch loop"""
thread = threading.Thread(target=fetch_loop)
thread.start()
def run():
"""Take the current value and gently converge on it"""
global value, current_value
pwm.setPWMFreq(60)
while (True):
diff = value - current_value
if diff > step:
print(str(current_value) + " DIFF " + str(diff))
current_value += step
elif diff < -step:
print(str(current_value) + " DIFF " + str(diff))
current_value -= step
pwm.setPWM(0, 0, current_value)
time.sleep(0.2)
run_background()
run()
|
Gpc.py
|
import re
from os import system, path
from threading import Thread
import pandas as pd
import numpy as np
from traceback import format_exc
from datetime import datetime
import pickle
from transformers import BertTokenizer, BertForSequenceClassification
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
PATH_TMP = './files_temp'
COL_TEXT_SEP = '__sys_gpc_text_sep__'
PAD_MAX_TOKENS = 25
BATCH_SIZE_AKA_MAX_ROWS_PER_GUESS_TO_FIT_GPU_MEM = int(4e3)
# BATCH_SIZE_AKA_MAX_ROWS_PER_GUESS_TO_FIT_GPU_MEM = int(1e3) # small batches
PRINT_EVERY_N = int(1e4)
# PRINT_EVERY_N = int(1) # print every time
MAX_TEST_ROWS = int(2e4) # TODO TEMP TEST DEBUG
path2here = '.'
dict_label_iid_pkl = f'{path2here}/model_save/dict_label_iid.pkl'
dict_label_t_pkl = f'{path2here}/model_save/dict_label_t.pkl'
dict_label_iid: dict = None
dict_label_t: dict = None
tokenizer: BertTokenizer = None
model: BertForSequenceClassification = None
# endregion globals
"""
originating from this tute - the test/eval inference part
http://mccormickml.com/2019/07/22/BERT-fine-tuning/
"""
# region cuda
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# TODO - find out how to use all GPUs
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# endregion cuda
# region load model
def load_model_n_stuff():
print('ai_gpc loading model and tokenizer...')
global dict_label_iid, dict_label_t
global tokenizer, model
if path.exists(dict_label_iid_pkl):
with open(dict_label_iid_pkl, 'rb') as f:
dict_label_iid = pickle.load(f)
if path.exists(dict_label_t_pkl):
with open(dict_label_t_pkl, 'rb') as f:
dict_label_t = pickle.load(f)
tokenizer = BertTokenizer.from_pretrained(f'{path2here}/model_save')
model = BertForSequenceClassification.from_pretrained(f'{path2here}/model_save')
print('setting model to', device)
model.to(device) # GPU or CPU
model.cuda()
model.eval()
# if is_cuda:
# model.cuda()
load_model_n_stuff()
# endregion load model
r_dang_chars = re.compile(r'[{}"]+') # dang stay for dangerous?
# region M A I N MAIN function to be called from flask
def gpc(name: str = 'file_name_unique_without_extension', top_cat: int = -1, top_cat_t: str = None):
def fn(name2: str = 'file_name_unique_without_extension'):
try:
system(f'cd {PATH_TMP}; tar -zxvf {name2}.feather.tar.gz')
ppath = f'{PATH_TMP}/{name2}.feather'
df = pd.read_feather(ppath)
print(f'original len {len(df)} titles')
if len(df) > MAX_TEST_ROWS:
df = df.sample(n=MAX_TEST_ROWS)
print(f'doing inference on {len(df)} titles')
with Gpc(df, top_cat, top_cat_t) as obj:
df = obj.prepare_and_guess()
obj.dump('end gpc instance... - we should be DONE ... maybe')
print('end gpc static... - we should be DONE')
except:
err = format_exc()
print(err)
# async
t = Thread(target=fn, args=(name,))
t.start()
# TODO TEMP DEBUG
t.join()
# endregion M A I N MAIN function to be called from flask
# =============
# MAIN method in Gpc class is: prepare_and_guess()
# =============
class Gpc:
def __init__(self, df: pd.DataFrame = None, top_cat: int = -1, top_cat_t: str = None):
super().__init__()
self.df = df
self.top_cat = top_cat
self.top_cat_t = top_cat_t
self.column = COL_TEXT_SEP
self.input_ids_test = []
self.labels_test = []
self.attention_masks_test = []
self.texts_test = []
self.test_dataloader: DataLoader = None
self.d: datetime = datetime.now()
def __del__(self):
try:
del self.df
del self.input_ids_test
del self.labels_test
del self.attention_masks_test
del self.texts_test
del self.test_dataloader
except:
format_exc()
def __enter__(self):
return self
def __exit__(self, ttype, value, traceback):
self.__del__()
# =============
# MAIN
# =============
def prepare_and_guess(self) -> pd.DataFrame:
self.texts_test = self.df[self.column].tolist()
self.labels_test = [0] * len(self.texts_test) # dummy
self.input_ids_test, self.attention_masks_test, self.labels_test = self.encode_stuff()
test_dataset = TensorDataset(self.input_ids_test, self.attention_masks_test, self.labels_test)
self.test_dataloader = DataLoader(
test_dataset,
sampler=SequentialSampler(test_dataset),
# batch_size=len(test_dataset) # AKA - single batch - nope! no mem for that
batch_size=BATCH_SIZE_AKA_MAX_ROWS_PER_GUESS_TO_FIT_GPU_MEM,
# tests
num_workers=8,
# maybe this is the culprit as suggested by user12750353 in stackoverflow
# pin_memory=True
pin_memory=False
)
# =======
# call MAIN - that's what we are here for - the main GPU thing
# =======
# self.dump('start predictions...')
predictions = self.guess()
# self.dump('end predictions...')
print('pytorch tensor shape is', predictions.shape)
label_indices = torch.argmax(predictions, dim=1)
self.dump('start loop df append...')
df = []
for i, o in enumerate(self.texts_test):
# t, iid, s = self.find_with_top_level(predictions[i])
label_index = label_indices[i]
t = dict_label_t.get(label_index)
# s = predictions[label_index] # A! A! A! getting a number from the GPU to real CPU word is a shit load of time! Nope!
# df.append(
# {
# 'text': o,
# 't': dict_label_t.get(label_index),
# 'iid': dict_label_iid.get(label_index),
# 's': predictions[label_index]
# }
# )
self.dump('end loop df append...')
self.dump('start df...')
df = pd.DataFrame(df)
self.dump('end df...')
return df
# GPU
def guess(self):
# =======
# MAIN - that's what we are here for - the main GPU thing
# =======
print()
print("that's what we are here for - the main GPU inference thing...")
print()
# predictions, true_labels = [], []
predictions = None
# torch.cuda.empty_cache()
for i, batch in enumerate(self.test_dataloader):
print()
self.dump('start empty cache...', i, 1)
# torch.cuda.empty_cache()
self.dump('end empty cache...', i, 1)
self.dump('start to device...', i, 1)
# region test shuffle
# if not i:
# batch = tuple(t.to(device) for t in batch) # to GPU when gpu (or CPU otherwise)
# else:
# for t in batch:
# t[...] = t[torch.randperm(t.shape[0], device=t.device)]
# endregion test shuffle
# region to device, where first batch is fast, next ones are slow
# there are just 3 tensors in each batch: input_ids, input_mask, labels
batch = tuple(t.to(device) for t in batch[:2]) # to GPU when gpu (or CPU otherwise)
# region to device, where first batch is fast, next ones are slow
# batch = list(t.to(device) for t in batch) # no real improvement
# batch = batch.to(device) # nope - this is just a list
self.dump('end to device...', i, 1)
# , b_labels - labels are not used
b_input_ids, b_input_mask = batch
self.dump('start outputs...', i, 1)
# torch.cuda.empty_cache()
with torch.no_grad():
# torch.cuda.empty_cache()
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
self.dump('end outputs...', i, 1)
self.dump('logits...', i, 1)
logits = outputs[0]
self.dump('start detach...', i, 1)
logits = logits.detach()
self.dump('end detach...', i, 1)
del outputs
predictions = logits if predictions is None else torch.cat((predictions, logits), 0)
del logits
del b_input_ids
del b_input_mask
# del b_labels
for o in batch:
del o
del batch
return predictions
def find_with_top_level(self, predictions: torch.tensor) -> (str, int, float):
if self.top_cat < 0:
# # label_index = np.argmax(predictions)
# label_index = torch.argmax(predictions)
# return dict_label_t.get(label_index), dict_label_iid.get(label_index), predictions[label_index]
return dict_label_t.get(0), dict_label_iid.get(0), 0
t = None
iid = None
score = None
# # for label_index in np.argsort(predictions)[::-1]:
# for label_index in torch.argsort(predictions)[::-1]:
#
# t = dict_label_t.get(label_index)
#
# if self.top_cat_t in t:
# iid = dict_label_iid.get(label_index)
# score = predictions[label_index]
# break
# else:
# t = None
if not t:
t = self.top_cat_t
iid = self.top_cat
score = 0.
return t, iid, score
# just on CPU
def encode_stuff(self) -> (list, list, list):
# just on cpu - TODO - make on multiple cpu's
print('disregard this - this runs on CPU and should be distributed along multi CPUs')
print()
for i, sent in enumerate(self.texts_test):
if not i % PRINT_EVERY_N:
print(f'encode_stuff {i}')
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens=True, # Add '[CLS]' and '[SEP]'
max_length=PAD_MAX_TOKENS, # Pad & truncate all sentences. was 64
truncation=True,
padding='max_length',
return_attention_mask=True, # Construct attn. masks.
return_tensors='pt', # Return pytorch tensors.
)
self.input_ids_test.append(encoded_dict['input_ids'])
self.attention_masks_test.append(encoded_dict['attention_mask'])
return torch.cat(self.input_ids_test, dim=0), \
torch.cat(self.attention_masks_test, dim=0), \
torch.tensor(self.labels_test)
# --- measure and print times
def dump(self, pref='blah', i = -1, print_every_n=None):
if not print_every_n:
print_every_n = PRINT_EVERY_N
if i>-1 and not i % print_every_n:
print(pref, (datetime.now() - self.d).total_seconds())
self.d = datetime.now()
|
traffic_generator_client.py
|
#!/usr/bin/env python3
import os
import threading
from time import sleep
class TrafficGeneratorClient:
BLOCK_SIZE = 4096
PATH_FICHERO_OBSERVACIONES = '/home/observacion'
# Constructor
def __init__(self, block_size=BLOCK_SIZE):
self.tiempo_anterior = 0
self.block_size = block_size
# Lee fichero con las observaciones
with open(self.PATH_FICHERO_OBSERVACIONES) as fichero_obs:
self.arr_obs = [line for line in fichero_obs]
def ejecuta_operacion(self, num_bytes, tipo_operacion, path_file):
block_of_bytes = bytes(self.block_size)
# ls del directorio
# path_directory = path_file.rsplit('/', 1)[0]
# os.system("ls -l " + path_directory + " > /dev/null")
if tipo_operacion == "w":
bytes_left_to_write = num_bytes
with open(path_file, 'wb', 0) as myfile:
while bytes_left_to_write >= self.block_size or bytes_left_to_write > 0:
if bytes_left_to_write >= self.block_size:
myfile.write(block_of_bytes)
else:
myfile.write(bytes(bytes_left_to_write))
# Fuerza a escribir en disco los bytes
myfile.flush()
os.fsync(myfile.fileno())
bytes_left_to_write -= self.block_size
elif tipo_operacion == "r":
with open(path_file, 'rb', 0) as myfile:
bytes_left_to_read = num_bytes
while bytes_left_to_read >= self.block_size or bytes_left_to_read > 0:
if bytes_left_to_read >= self.block_size:
myfile.read(self.block_size)
else:
myfile.read(bytes_left_to_read)
bytes_left_to_read -= self.block_size
def start(self):
i = 0
while True:
# Procesado observacion ej. ('tiempo entre llegadas' 'bytes' 'path' 'tipo op')
obs = self.arr_obs[i].split()
interarrival_time = float(obs[0])
num_bytes = int(obs[1])
tipo_op = obs[2]
path_file = obs[3]
# Espera el tiempo entre llegadas para volver a entrar en la funcion
sleep(float(interarrival_time))
threading.Thread(target=self.ejecuta_operacion, args=(num_bytes, tipo_op, path_file)).start()
i += 1
# Termina el programa cuando se acaben las observaciones
if i == len(self.arr_obs):
break
|
process_task.py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: process_task for a task(ProcessMode)
Created: 2016/12/12
"""
import unittest
import time
import multiprocessing
import threading
import sys
import types
import warnings
from multiprocessing import Pipe
from pprint import pprint
import traceback
if sys.version.startswith('2'):
import exceptions
else:
from . import exceptions
def sleep_(num):
#pprint("~~~")
time.sleep(num)
#----------------------------------------------------------------------
def result_callback(result):
""""""
for i in result:
yield i
#----------------------------------------------------------------------
def testfun(num):
""""""
#print('UserFunc called!')
for i in range(6):
threading.Thread(target=sleep_, args=(num,)).start()
#print('SubProcess Called!')
time.sleep(0.4)
for i in range(5):
yield i
#pprint(threading.enumerate())
########################################################################
class ProcessTask(multiprocessing.Process):
""""""
#----------------------------------------------------------------------
def __init__(self, id, target, args=tuple(), kwargs={},
status_monitor_pipe=None, result_pipe=None,
result_hook_function=None,
threads_update_interval=0.0):
"""Constructor"""
multiprocessing.Process.__init__(self, name=id)
self._target = target
self.args = args
self.kwargs = kwargs
self._id = id
self._sub_threads_list = []
self._threads_update_interval = threads_update_interval
#
# Bulid result
#
self._status_monitor_pipe = status_monitor_pipe
self._result_send_pipe = result_pipe
self._result_hook = result_hook_function
#self._init_timer()
#----------------------------------------------------------------------
def _init_timer(self):
""""""
self._threads_monitor = threading.Thread(name='update_subthreads_list',
target=self._deamon_check_threads)
self._threads_monitor.daemon = True
self._threads_monitor.start()
#----------------------------------------------------------------------
@property
def task_id(self):
""""""
return self._id
#----------------------------------------------------------------------
def run(self):
""""""
self._init_timer()
resultdict = {}
resultdict['state'] = False
resultdict['exception'] = ''
resultdict['result'] = ''
try:
#
# getting result and process result
#
result = self._target(*self.args, **self.kwargs)
if self._result_hook:
result = self._result_hook(result)
resultdict['state'] = True
#
# send back the result element
#
if isinstance(result, types.GeneratorType):
for i in result:
try:
resultdict['result'] = i
self._result_send_pipe.send(resultdict)
except Exception as e:
warnings.warn('[?] the result cannot be send back!' + \
'\n Because : \n' + \
traceback.format_exc())
else:
try:
resultdict['result'] = result
self._result_send_pipe.send(resultdict)
except Exception as e:
warnings.warn('[?] the result cannot be send back!' + \
'\n Because: \n' + \
traceback.format_exc())
except Exception as e:
resultdict['exception'] = traceback.format_exc()
self._result_send_pipe.send(resultdict)
#
# close result pipe
#
self._result_send_pipe.close()
#----------------------------------------------------------------------
def _enum_threads(self):
""""""
threads_list = threading.enumerate()
return threads_list
#----------------------------------------------------------------------
def _deamon_check_threads(self):
""""""
assert isinstance(self._threads_update_interval, (int, float))
while True:
#pprint('test')
self._sub_threads_list = None
self._sub_threads_list = self._enum_threads()
#print(len(self._sub_threads_list))
#print len(self._sub_threads_list)
threads_check_result = {}
threads_check_result['timestamp'] = time.time()
threads_check_result['from'] = self._id
for i in self._sub_threads_list:
threads_check_result[i.name] = i.is_alive()
#pprint(threads_check_result)
self._status_monitor_pipe.send(threads_check_result)
time.sleep(self._threads_update_interval)
##----------------------------------------------------------------------
#@property
#def subthreads_count(self):
#return len(self._sub_threads_list)
########################################################################
class ProcessTaskTest(unittest.case.TestCase):
""""""
#----------------------------------------------------------------------
def print_bar(self):
""""""
print(('-'*64))
#----------------------------------------------------------------------
def print_end_bar(self):
""""""
print(('-'*30 + 'END' + '-'*31))
#----------------------------------------------------------------------
def test_basic_usage(self):
""""""
pipp, pipc = Pipe()
pips, pipr = Pipe()
self.print_bar()
print('Test Task Interface')
ret_process = ProcessTask(id='test-1', target=testfun, args=(5,),
status_monitor_pipe=pipc,
result_pipe=pips,
result_hook_function=result_callback)
ret_process.start()
print('Test get threads status')
time.sleep(1)
#print(ret_process.subthreads_count)
threads_status = pipp.recv()
self.assertIsInstance(threads_status, dict)
#print pipr.recv()
#print pipr.recv()
#print pipr.recv()
#print pipr.recv()
self.print_end_bar()
if __name__ == '__main__':
unittest.main()
|
test_config.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test ``config`` command."""
import subprocess
import sys
from threading import Thread
from time import sleep
import pytest
from renku.cli import cli
from tests.utils import format_result_exception, retry_failed
def test_config_value_locally(client, runner, project, global_config_dir):
"""Check setting/getting from local configuration."""
result = runner.invoke(cli, ["config", "set", "key", "local-value"])
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "show", "key"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "local-value\n"
# Value set locally is not visible globally
result = runner.invoke(cli, ["config", "show", "key", "--global"])
assert 2 == result.exit_code
# Reading non-existing values is an error
result = runner.invoke(cli, ["config", "show", "non-existing"])
assert 2 == result.exit_code
def test_config_value_globally(client, runner, project, global_config_dir):
"""Check setting/getting from global configuration."""
result = runner.invoke(cli, ["config", "set", "key", "global-value", "--global"])
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "show", "key"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "global-value\n"
result = runner.invoke(cli, ["config", "show", "key", "--global"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "global-value\n"
# Value set globally is not visible in local config
result = runner.invoke(cli, ["config", "show", "key", "--local"])
assert 2 == result.exit_code
def test_config_default(client, runner, project, global_config_dir):
"""Check setting/getting from local configuration."""
result = runner.invoke(cli, ["config", "set", "lfs_threshold", "0b"])
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "set", "lfs_threshold", "10mb", "--global"])
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "show", "lfs_threshold"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "0b\n"
result = runner.invoke(cli, ["config", "show", "lfs_threshold", "--global"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "10mb\n"
result = runner.invoke(cli, ["config", "show", "lfs_threshold", "--default"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "100kb\n"
def test_config_get_non_existing_value(client, runner, project, global_config_dir):
"""Check getting non-existing value is an error."""
result = runner.invoke(cli, ["config", "show", "non-existing"])
assert 2 == result.exit_code
def test_local_overrides_global_config(client, runner, project, global_config_dir):
"""Test setting config both global and locally."""
result = runner.invoke(cli, ["config", "set", "key", "global-value", "--global"])
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "show", "key"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "global-value\n"
result = runner.invoke(cli, ["config", "set", "key", "local-value"])
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "show", "key"])
assert 0 == result.exit_code, format_result_exception(result)
assert result.output == "local-value\n"
@pytest.mark.parametrize("global_only", (False, True))
def test_config_remove_value_locally(client, runner, project, global_config_dir, global_only):
"""Check removing value from local configuration."""
param = ["--global"] if global_only else []
result = runner.invoke(cli, ["config", "set", "key", "some-value"] + param)
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "show", "key"] + param)
assert "some-value\n" == result.output
result = runner.invoke(cli, ["config", "remove", "key"] + param)
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "show", "key"] + param)
assert "some-value" not in result.output
def test_local_config_committed(client, runner, data_repository, global_config_dir):
"""Test local configuration update is committed only when it is changed."""
commit_sha_before = client.repo.head.object.hexsha
result = runner.invoke(cli, ["config", "set", "local-key", "value"])
assert 0 == result.exit_code, format_result_exception(result)
commit_sha_after = client.repo.head.object.hexsha
assert commit_sha_after != commit_sha_before
# Adding the same config should not create a new commit
commit_sha_before = client.repo.head.object.hexsha
result = runner.invoke(cli, ["config", "set", "local-key", "value"])
assert 0 == result.exit_code, format_result_exception(result)
commit_sha_after = client.repo.head.object.hexsha
assert commit_sha_after == commit_sha_before
# Adding a global config should not create a new commit
result = runner.invoke(cli, ["config", "set", "global-key", "value", "--global"])
assert 0 == result.exit_code, format_result_exception(result)
commit_sha_after = client.repo.head.object.hexsha
assert commit_sha_after == commit_sha_before
@pytest.mark.parametrize(
"args,message",
[
(
["show", "--local", "--global", "key"],
"Illegal usage: `local_only` is mutually exclusive with arguments `--default, --global`",
),
],
)
def test_invalid_command_args(client, runner, project, global_config_dir, args, message):
"""Test invalid combination of command-line arguments."""
result = runner.invoke(cli, ["config"] + args)
assert 2 == result.exit_code
assert message in result.output
@pytest.mark.parametrize("config_key", ["data_directory"])
def test_readonly_config(client, runner, project, config_key):
"""Test readonly config can only be set once."""
result = runner.invoke(cli, ["config", "set", config_key, "value"])
assert 0 == result.exit_code, format_result_exception(result)
result = runner.invoke(cli, ["config", "set", config_key, "value"])
assert 2 == result.exit_code
assert f"Configuration {config_key} cannot be modified." in result.output
result = runner.invoke(cli, ["config", "remove", config_key])
assert 2 == result.exit_code
assert f"Configuration {config_key} cannot be modified." in result.output
def test_config_read_concurrency(runner, project, client, run):
"""Test config can be read concurrently."""
result = runner.invoke(cli, ["config", "set", "test", "value"])
assert 0 == result.exit_code, format_result_exception(result)
command = [
"nice", # NOTE: Set low priority to increase chance of concurrency issues happening
"-n",
"19",
sys.executable,
"-m",
"renku.cli",
"config",
"show",
"test",
]
processes = []
for _ in range(20):
processes.append(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
assert all(p.wait() == 0 for p in processes)
assert all(p.stdout.read().decode("utf8") == "value\n" for p in processes)
@retry_failed
def test_config_write_concurrency(monkeypatch, runner, project, client, run):
"""Test config cannot be written concurrently. Only one execution succeedes in that case."""
from renku.core.management.config import ConfigManagerMixin
REPETITIONS = 4
CONFIG_KEY = "write_key"
CONFIG_VALUE = "write_value"
# NOTE: monkey patch the _write_config private method to introduce a slowdown when writing to the file
with monkeypatch.context() as mp:
def _write_config(s, filepath, config):
with open(filepath, "w+") as file:
sleep(REPETITIONS + 1)
config.write(file)
mp.setattr(ConfigManagerMixin, "_write_config", _write_config)
def write_value(index):
result = runner.invoke(cli, ["config", "set", "--global", CONFIG_KEY, CONFIG_VALUE])
results[index] = result
def get_value():
result = runner.invoke(cli, ["config", "show", "--global", CONFIG_KEY])
return result.output if "not found" not in result.output else False
# NOTE: check the value was not previously set
assert not get_value()
threads = [None] * REPETITIONS
results = [None] * REPETITIONS
for i in range(REPETITIONS):
threads[i] = Thread(target=write_value, args=(i,))
threads[i].start()
sleep(1)
for i in range(REPETITIONS):
threads[i].join()
# NOTE: verify all executions finish, some succesfully and others not
KO = "Unable to acquire lock"
OK = "OK"
assert all(0 == r.exit_code for r in results)
assert any(KO in r.output for r in results)
assert any(OK in r.output for r in results)
# NOTE: assess only one execution succeeded and all the other failed
def single_true(iterable):
i = iter(iterable)
return any(i) and not any(i)
assert single_true(OK in r.output for r in results)
assert all(KO in r.output or OK in r.output for r in results)
# NOTE: assess the value was actually written
assert CONFIG_VALUE in get_value()
|
scan.py
|
###############################################################################
# Name : Scan - Image scanner
# Author : Alexander Parent
#
# Copyright 2020 BlackBerry Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import json
import sys
import CreateSpreadsheet
import utils
# TODO: Copy certs
# TODO: Fix configure / destroy
from multiprocessing import Process
sys.path.insert(1, '/plugins')
def runScanner(plugins, scanner, image):
plugins[scanner].scan(image)
with open('/config/config.json') as f:
config = json.load(f)
if 'auths' in config:
os.makedirs(os.path.dirname('/home/vagrant/.docker/config.json'), exist_ok=True)
with open('/home/vagrant/.docker/config.json', 'w') as f:
f.write(json.dumps({'auths': config['auths']}))
os.makedirs(os.path.dirname('/root/.docker/config.json'), exist_ok=True)
with open('/root/.docker/config.json', 'w') as f:
f.write(json.dumps({'auths': config['auths']}))
if 'scanners' not in config:
print("Scanners array does not exist in config.json file. See README.")
if 'images' not in config:
print("Images array does not exist in config.json file. See README.")
if len(config['scanners']) == 0:
print("Scanners list in config is empty.")
if len(config['images']) == 0:
print("Images list in config is empty.")
for image in config['images']:
os.system("docker pull " + image)
plugins = utils.loadPlugins()
# Scan images
processes = []
for scanner in config['scanners']:
if scanner not in plugins:
print("Cannot run scanner", scanner, "plugin not loaded.")
else:
os.makedirs(f"/results/{scanner}", exist_ok=True)
plugins[scanner].configure()
# Setup scanners
for image in config['images']:
if config['parallel']:
p = Process(target=runScanner, args=(plugins, scanner, image,))
processes.append(p)
p.start()
else:
plugins[scanner].scan(image)
if config['parallel']:
# Join all running scanners
for p in processes:
p.join()
# All scanners are done, destroy them
for scanner in config['scanners']:
plugins[scanner].destroy()
print("All scanners have finished")
CreateSpreadsheet.createSpreadsheet()
|
spanprocessor.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import threading
import typing
from opentelemetry.context import attach, detach, set_value
from opentelemetry.sdk.trace import Span, SpanProcessor
from opentelemetry.sdk.trace.export import SpanExporter
from opentelemetry.trace import INVALID_TRACE_ID
from opentelemetry.util import time_ns
logger = logging.getLogger(__name__)
class DatadogExportSpanProcessor(SpanProcessor):
"""Datadog exporter span processor
DatadogExportSpanProcessor is an implementation of `SpanProcessor` that
batches all opened spans into a list per trace. When all spans for a trace
are ended, the trace is queues up for export. This is required for exporting
to the Datadog Agent which expects to received list of spans for each trace.
"""
_FLUSH_TOKEN = INVALID_TRACE_ID
def __init__(
self,
span_exporter: SpanExporter,
schedule_delay_millis: float = 5000,
max_trace_size: int = 4096,
):
if max_trace_size <= 0:
raise ValueError("max_queue_size must be a positive integer.")
if schedule_delay_millis <= 0:
raise ValueError("schedule_delay_millis must be positive.")
self.span_exporter = span_exporter
# queue trace_ids for traces with recently ended spans for worker thread to check
# for exporting
self.check_traces_queue = (
collections.deque()
) # type: typing.Deque[int]
self.traces_lock = threading.Lock()
# dictionary of trace_ids to a list of spans where the first span is the
# first opened span for the trace
self.traces = collections.defaultdict(list)
# counter to keep track of the number of spans and ended spans for a
# trace_id
self.traces_spans_count = collections.Counter()
self.traces_spans_ended_count = collections.Counter()
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
# threading conditions used for flushing and shutdown
self.condition = threading.Condition(threading.Lock())
self.flush_condition = threading.Condition(threading.Lock())
# flag to indicate that there is a flush operation on progress
self._flushing = False
self.max_trace_size = max_trace_size
self._spans_dropped = False
self.schedule_delay_millis = schedule_delay_millis
self.done = False
self.worker_thread.start()
def on_start(self, span: Span) -> None:
ctx = span.get_context()
trace_id = ctx.trace_id
with self.traces_lock:
# check upper bound on number of spans for trace before adding new
# span
if self.traces_spans_count[trace_id] == self.max_trace_size:
logger.warning("Max spans for trace, spans will be dropped.")
self._spans_dropped = True
return
# add span to end of list for a trace and update the counter
self.traces[trace_id].append(span)
self.traces_spans_count[trace_id] += 1
def on_end(self, span: Span) -> None:
if self.done:
logger.warning("Already shutdown, dropping span.")
return
ctx = span.get_context()
trace_id = ctx.trace_id
with self.traces_lock:
self.traces_spans_ended_count[trace_id] += 1
if self.is_trace_exportable(trace_id):
self.check_traces_queue.appendleft(trace_id)
def worker(self):
timeout = self.schedule_delay_millis / 1e3
while not self.done:
if not self._flushing:
with self.condition:
self.condition.wait(timeout)
if not self.check_traces_queue:
# spurious notification, let's wait again
continue
if self.done:
# missing spans will be sent when calling flush
break
# substract the duration of this export call to the next timeout
start = time_ns()
self.export()
end = time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
# be sure that all spans are sent
self._drain_queue()
def is_trace_exportable(self, trace_id):
return (
self.traces_spans_count[trace_id]
- self.traces_spans_ended_count[trace_id]
<= 0
)
def export(self) -> None:
"""Exports traces with finished spans."""
notify_flush = False
export_trace_ids = []
while self.check_traces_queue:
trace_id = self.check_traces_queue.pop()
if trace_id is self._FLUSH_TOKEN:
notify_flush = True
else:
with self.traces_lock:
# check whether trace is exportable again in case that new
# spans were started since we last concluded trace was
# exportable
if self.is_trace_exportable(trace_id):
export_trace_ids.append(trace_id)
del self.traces_spans_count[trace_id]
del self.traces_spans_ended_count[trace_id]
if len(export_trace_ids) > 0:
token = attach(set_value("suppress_instrumentation", True))
for trace_id in export_trace_ids:
with self.traces_lock:
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(self.traces[trace_id]) # type: ignore
# pylint: disable=broad-except
except Exception:
logger.exception(
"Exception while exporting Span batch."
)
finally:
del self.traces[trace_id]
detach(token)
if notify_flush:
with self.flush_condition:
self.flush_condition.notify()
def _drain_queue(self):
""""Export all elements until queue is empty.
Can only be called from the worker thread context because it invokes
`export` that is not thread safe.
"""
while self.check_traces_queue:
self.export()
def force_flush(self, timeout_millis: int = 30000) -> bool:
if self.done:
logger.warning("Already shutdown, ignoring call to force_flush().")
return True
self._flushing = True
self.check_traces_queue.appendleft(self._FLUSH_TOKEN)
# wake up worker thread
with self.condition:
self.condition.notify_all()
# wait for token to be processed
with self.flush_condition:
ret = self.flush_condition.wait(timeout_millis / 1e3)
self._flushing = False
if not ret:
logger.warning("Timeout was exceeded in force_flush().")
return ret
def shutdown(self) -> None:
# signal the worker thread to finish and then wait for it
self.done = True
with self.condition:
self.condition.notify_all()
self.worker_thread.join()
self.span_exporter.shutdown()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.